hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d98d62c09e7085ca98fceecdd882e9c44ffbf35e
| 2,340
|
py
|
Python
|
tests/test_model.py
|
datopian/babbage
|
3cffb4415b8b29503ab2939917e2aadc9474bb2b
|
[
"MIT"
] | 27
|
2015-12-05T10:51:46.000Z
|
2021-09-06T23:46:10.000Z
|
tests/test_model.py
|
dfrankow/babbage
|
9e03efe62e0be0cceabafd4de2a09cb8ec794b92
|
[
"MIT"
] | 29
|
2015-12-04T09:54:22.000Z
|
2021-11-22T12:26:08.000Z
|
tests/test_model.py
|
dfrankow/babbage
|
9e03efe62e0be0cceabafd4de2a09cb8ec794b92
|
[
"MIT"
] | 12
|
2016-04-04T11:32:07.000Z
|
2021-11-23T16:29:11.000Z
|
import pytest
class TestModel(object):
def test_model_concepts(self, simple_model):
concepts = list(simple_model.concepts)
assert len(concepts) == 11, len(concepts)
def test_model_match(self, simple_model):
concepts = list(simple_model.match('foo'))
assert len(concepts) == 1, len(concepts)
def test_model_match_invalid(self, simple_model):
concepts = list(simple_model.match('fooxx'))
assert len(concepts) == 0, len(concepts)
def test_model_aggregates(self, simple_model):
aggregates = list(simple_model.aggregates)
assert len(aggregates) == 2, aggregates
def test_model_fact_table(self, simple_model):
assert simple_model.fact_table_name == 'simple'
assert 'simple' in repr(simple_model), repr(simple_model)
def test_model_hierarchies(self, simple_model):
hierarchies = list(simple_model.hierarchies)
assert len(hierarchies) == 1
def test_model_dimension_hierarchies(self, simple_model):
bar = simple_model.match('bar')[0]
baz = simple_model.match('baz')[0]
assert bar.ref.startswith('bar.')
assert baz.alias.startswith('bazwaz.')
def test_deref(self, simple_model):
assert simple_model['foo'].name == 'foo'
assert simple_model['foo.key'].name == 'key'
assert simple_model['amount'].name == 'amount'
assert 'amount' in simple_model
assert 'amount.sum' in simple_model
assert '_count' in simple_model
assert 'yabba' not in simple_model
assert 'foo.key' in simple_model
def test_repr(self, simple_model):
assert 'amount' in repr(simple_model['amount'])
assert 'amount.sum' in repr(simple_model['amount.sum'])
assert 'foo.key' in repr(simple_model['foo.key'])
assert 'foo' in repr(simple_model['foo'])
assert 'foo' in str(simple_model['foo'])
assert simple_model['foo'] == 'foo'
def test_to_dict(self, simple_model):
data = simple_model.to_dict()
assert 'measures' in data
assert 'amount' in data['measures']
assert 'amount.sum' in data['aggregates']
assert 'ref' in data['measures']['amount']
assert 'dimensions' in data
assert 'hierarchies' in data
assert 'foo' in data['dimensions']
| 37.741935
| 65
| 0.654274
|
a8ac6d5947fac6dfab8f59519afd1af30dc05fb3
| 627
|
py
|
Python
|
lecture8/tests/question-8_2.py
|
ggorman/Introduction-Python-programming-2018
|
739b864c1499ccdbf9010d8fe774087a07bb09ee
|
[
"CC-BY-3.0"
] | 1
|
2019-01-12T12:43:24.000Z
|
2019-01-12T12:43:24.000Z
|
lecture8/tests/question-8_2.py
|
ggorman/Introduction-Python-programming-2018
|
739b864c1499ccdbf9010d8fe774087a07bb09ee
|
[
"CC-BY-3.0"
] | null | null | null |
lecture8/tests/question-8_2.py
|
ggorman/Introduction-Python-programming-2018
|
739b864c1499ccdbf9010d8fe774087a07bb09ee
|
[
"CC-BY-3.0"
] | 3
|
2019-05-16T21:08:48.000Z
|
2022-02-21T06:54:57.000Z
|
test = {
'name': 'question 8.2',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> s1.i==64
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> s2.i=='HelloHelloHelloHello'
True
""",
'hidden': False,
'locked': False
},
],
'scored': True,
'setup': """
s1 = Simple(4)
for i in range(4):
s1.double()
s2 = Simple('Hello')
for i in range(2):
s2.double()
""",
'teardown': '',
'type': 'doctest'
}
]
}
| 16.076923
| 42
| 0.354067
|
f96176f12553a6387a351f352ffc49d63f6514b4
| 1,805
|
py
|
Python
|
iexdownload/downloader.py
|
shawnlinxl/iexdownload
|
9d4934e4342005b3d07b69a21acba26f8bf5502d
|
[
"MIT"
] | null | null | null |
iexdownload/downloader.py
|
shawnlinxl/iexdownload
|
9d4934e4342005b3d07b69a21acba26f8bf5502d
|
[
"MIT"
] | null | null | null |
iexdownload/downloader.py
|
shawnlinxl/iexdownload
|
9d4934e4342005b3d07b69a21acba26f8bf5502d
|
[
"MIT"
] | null | null | null |
import pandas as pd
from typing import List
from iexcloud import Stock
class Downloader(object):
def __init__(self):
self.symbol = set()
self.stock = dict()
self.writer = list()
def add_writer(self, writer):
self.writer.append(writer)
def add_symbol(self, symbol: List[str]):
if type(symbol) is not list:
symbol = [symbol]
for s in symbol:
self.symbol.add(s)
if s not in self.stock:
self.stock[s] = Stock(s, output="pandas")
def download_price(self, time_range: str = "5d") -> None:
for stock in self.stock.values():
data = stock.get_price(time_range=time_range)
if data.shape[0] > 0:
for writer in self.writer:
writer.write(data, f"price/{stock.symbol}.csv")
def download_dividend(self, time_range: str = "1m") -> None:
for stock in self.stock.values():
data = stock.get_dividend(time_range=time_range)
if data.shape[0] > 0:
for writer in self.writer:
writer.write(data, f"dividend/{stock.symbol}.csv")
def download_split(self, time_range: str = "1m") -> None:
for stock in self.stock.values():
data = stock.get_split(time_range=time_range)
if data.shape[0] > 0:
for writer in self.writer:
writer.write(data, f"split/{stock.symbol}.csv")
def download_profile(self) -> None:
for stock in self.stock.values():
data = stock.get_profile()
data = pd.DataFrame.from_dict(data)
if data.shape[0] > 0:
for writer in self.writer:
writer.write(data, f"profile/{stock.symbol}.csv")
| 27.769231
| 70
| 0.559557
|
8da7f46bff0e0df728132ec0ae017fcb6417a14f
| 271
|
py
|
Python
|
exercicios/exercicio014.py
|
callebbs/curosemvideo-python
|
63c2aac1f671973e92b753ec487e82c8871a3ded
|
[
"MIT"
] | null | null | null |
exercicios/exercicio014.py
|
callebbs/curosemvideo-python
|
63c2aac1f671973e92b753ec487e82c8871a3ded
|
[
"MIT"
] | null | null | null |
exercicios/exercicio014.py
|
callebbs/curosemvideo-python
|
63c2aac1f671973e92b753ec487e82c8871a3ded
|
[
"MIT"
] | null | null | null |
'''
Escreva um programa que converta uma temperatura digitando em graus Celsius e converta para graus Fahrenheit.
'''
temp = float(input('Qual é a temperatura atual? '))
print('A temperatura de {}°C convertida para Farenheit é de: {}°F'.format(temp, (temp * 9/5) + 32))
| 38.714286
| 109
| 0.715867
|
611cc4ac7c2419481febb54b71ff94bfc1b3b5b5
| 22,300
|
py
|
Python
|
tensor2tensor/mesh_tensorflow/mtf_layers.py
|
Zhangyantsing/tensor2tensor
|
b6abf28a1a903c91eb75d7a102945a780899d6e9
|
[
"Apache-2.0"
] | 1
|
2018-12-12T18:50:28.000Z
|
2018-12-12T18:50:28.000Z
|
tensor2tensor/mesh_tensorflow/mtf_layers.py
|
Zhangyantsing/tensor2tensor
|
b6abf28a1a903c91eb75d7a102945a780899d6e9
|
[
"Apache-2.0"
] | null | null | null |
tensor2tensor/mesh_tensorflow/mtf_layers.py
|
Zhangyantsing/tensor2tensor
|
b6abf28a1a903c91eb75d7a102945a780899d6e9
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layers for mesh tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.mesh_tensorflow import mesh_tensorflow as mtf
import tensorflow as tf
def dense(x, output_dim, reduced_dims=None, expert_dims=None,
use_bias=True, activation=None, name=None):
"""Dense layer doing (kernel*x + bias) computation.
Args:
x: a mtf.Tensor of shape [..., reduced_dims].
output_dim: a mtf.Dimension
reduced_dims: an optional list of mtf.Dimensions of x to be reduced. If
omitted, we reduce the last dimension.
expert_dims: an optional list of mtf.Dimension which represent different
experts. Different experts get different weights.
use_bias: a boolean, whether to add bias.
activation: an optional function from mtf.Tensor to mtf.Tensor
name: a string. variable scope.
Returns:
a mtf.Tensor of shape [..., output_dim].
"""
if expert_dims is None:
expert_dims = []
if reduced_dims is None:
reduced_dims = x.shape.dims[-1:]
w_shape = mtf.Shape(expert_dims + reduced_dims + [output_dim])
output_shape = mtf.Shape(
[d for d in x.shape.dims if d not in reduced_dims] + [output_dim])
with tf.variable_scope(name, default_name="dense"):
stddev = mtf.list_product(d.size for d in reduced_dims) ** -0.5
w = mtf.get_variable(
x.mesh,
"kernel",
w_shape,
initializer=tf.random_normal_initializer(stddev=stddev),
activation_dtype=x.dtype)
y = mtf.matmul(x, w, output_shape=output_shape)
if use_bias:
b = mtf.get_variable(
x.mesh,
"bias",
mtf.Shape(expert_dims + [output_dim]),
initializer=tf.zeros_initializer(),
activation_dtype=x.dtype)
y += b
if activation is not None:
y = activation(y)
return y
def layer_norm(x, dim, epsilon=1e-6, name="layer_prepostprocess"):
"""Layer normalization over dimension dim.
Args:
x: a mtf.Tensor whose shape contains dim.
dim: a mtf.Dimension
epsilon: a floating point number
name: a string. variable scope.
Returns:
a mtf.Tensor with same shape as x.
"""
with tf.variable_scope(name + "/layer_norm"):
scale = mtf.get_variable(
x.mesh,
"layer_norm_scale",
mtf.Shape([dim]),
initializer=tf.ones_initializer(),
activation_dtype=x.dtype)
bias = mtf.get_variable(
x.mesh,
"layer_norm_bias",
mtf.Shape([dim]),
initializer=tf.zeros_initializer(),
activation_dtype=x.dtype)
reduced_shape = x.shape - dim
mean = mtf.reduce_mean(x, output_shape=reduced_shape)
variance = mtf.reduce_mean(mtf.square(x - mean), output_shape=reduced_shape)
norm_x = (x - mean) * mtf.rsqrt(variance + epsilon)
return norm_x * scale + bias
def softmax_cross_entropy_with_logits(logits, targets, vocab_dim):
"""Per-example softmax loss.
Args:
logits: a mtf.Tensor whose shape contains vocab_dim
targets: a mtf.Tensor with the same shape as logits
vocab_dim: a mtf.Dimension
Returns:
a mtf.Tensor whose shape is equal to logits.shape - vocab_dim
Raises:
ValueError: if the shapes do not match.
"""
if logits.shape != targets.shape:
raise ValueError(
"logits shape must equal targets shape"
"logits=%s targets=%s" % (logits.to_string, targets.to_string))
if vocab_dim not in logits.shape.dims:
raise ValueError("vocab_dim must be in logits.shape.dims")
log_softmax = mtf.log_softmax(logits, vocab_dim)
return mtf.negative(
mtf.reduce_sum(log_softmax * targets, reduced_dim=vocab_dim))
def weights_nonzero(targets, dtype=tf.float32):
def my_fn(x):
return tf.cast(tf.not_equal(x, 0), dtype)
return mtf.cwise(my_fn, [targets], output_dtype=dtype, name="weights_nonzero")
def dense_relu_dense(x,
hidden_channels,
dropout=0.0,
dropout_broadcast_dims=None,
name=None):
"""Hidden layer with ReLU activation followed by linear projection.
The output has the same number of channels as the input.
Args:
x: a mtf.Tensor
hidden_channels: a mtf.Dimension - channels in the hidden layer
dropout: an optional float
dropout_broadcast_dims: an optional list of mtf.Dimension
name: an optional string
Returns:
a mtf.Tensor with the same shape as x.
"""
with tf.variable_scope(name, default_name="dense_relu_dense"):
io_channels = x.shape.dims[-1]
stddev = (hidden_channels.size * io_channels.size) ** -0.25
io = mtf.Dimension("io", 2)
w = mtf.get_variable(
x.mesh,
"kernel",
mtf.Shape([io, io_channels, hidden_channels]),
initializer=tf.random_normal_initializer(stddev=stddev),
activation_dtype=x.dtype)
wi, wo = mtf.unstack(w, io)
h = mtf.relu(mtf.einsum([x, wi]))
if dropout != 0.0:
h = mtf.dropout(h, 1.0 - dropout,
noise_shape=h.shape - dropout_broadcast_dims)
return mtf.einsum([h, wo])
def masked_local_attention_1d(query_antecedent,
memory_antecedent,
kv_channels,
heads,
block_length=128,
name=None):
"""Attention to the source position and a neighborhood to the left of it.
The sequence is divided into blocks of length block_size.
Attention for a given query position can only see memory positions
less than or equal to the query position, in the corresponding block
and the previous block.
Args:
query_antecedent: a mtf.Tensor with shape [batch, query_length, io_channels]
memory_antecedent: a mtf.Tensor with shape
[batch, memory_length, io_channels] (optional). Currently, memory_length
must have the same size as query_length, but a different name.
kv_channels: a mtf.Dimension (the size of the key and value vectors)
heads: a mtf.Dimension (the number of heads)
block_length: an integer, representing receptive fields for attention.
name: an optional string.
Returns:
a Tensor of shape [batch, query_length, io_channels]
Raises:
ValueError: if channels or depth don't match.
"""
with tf.variable_scope(
name, default_name="multihead_attention",
values=[query_antecedent, memory_antecedent]):
batch, query_length, io_channels = query_antecedent.shape.dims
q_var, k_var, v_var, o_var = multihead_attention_vars(
query_antecedent.mesh, heads, io_channels, kv_channels,
query_antecedent.dtype)
if memory_antecedent is None:
memory_antecedent = rename_length_to_memory_length(
query_antecedent, query_length.name)
memory_batch, memory_length, memory_channels = memory_antecedent.shape.dims
if memory_batch != batch:
raise ValueError("memory batch must equal query batch")
if memory_channels != io_channels:
raise ValueError("memory channels must equal query channels")
# Get query q, keys k and values v.
q = mtf.einsum(
[query_antecedent, q_var],
mtf.Shape([batch, heads, query_length, kv_channels]))
k = mtf.einsum(
[memory_antecedent, k_var],
mtf.Shape([batch, heads, memory_length, kv_channels]))
v = mtf.einsum(
[memory_antecedent, v_var],
mtf.Shape([batch, heads, memory_length, kv_channels]))
# Let's assume for now we don't have padding and the block length equally
# divides the memory length.
block_length = (query_length.size
if query_length.size < block_length * 2 else block_length)
blength = mtf.Dimension("block_length", block_length)
mlength = mtf.Dimension("mem_block_length", block_length)
num_blocks = mtf.Dimension("num_blocks", query_length.size // block_length)
q = mtf.reshape(
q, mtf.Shape([batch, heads, num_blocks, blength, kv_channels]))
k = mtf.reshape(
k, mtf.Shape([batch, heads, num_blocks, mlength, kv_channels]))
v = mtf.reshape(
v, mtf.Shape([batch, heads, num_blocks, mlength, kv_channels]))
# compute attention for the first query block.
def first_block_attention():
"""Compute attention for the first block."""
first_q = mtf.slice(q, 0, 1, num_blocks.name)
first_k = mtf.slice(k, 0, 1, num_blocks.name)
first_v = mtf.slice(v, 0, 1, num_blocks.name)
first_output = dot_product_attention(first_q,
first_k,
first_v,
mask=None)
return first_output
# Attention for first block, since query_length = key_length.
first_output = first_block_attention()
# Concatenate two adjacent blocks to compute the overlapping memory block.
def local(x):
"""Helper function to get memory blocks."""
prev_block = mtf.slice(x, 0, num_blocks.size-1, num_blocks.name)
cur_block = mtf.slice(x, 1, num_blocks.size-1, num_blocks.name)
local_block = mtf.concat([prev_block, cur_block], mlength.name)
return local_block
local_k = local(k)
local_v = local(v)
# Calculate the causal mask to avoid peeking into the future. We compute
# this once and reuse it for all blocks since the block_size is known.
mlength = local_k.shape.dims[3]
mask = attention_bias_local_block(query_antecedent.mesh,
blength, mlength)
# Remove the first block from q since we already computed that.
tail_q = mtf.slice(q, 1, num_blocks.size-1, num_blocks.name)
tail_output = dot_product_attention(tail_q,
local_k,
local_v,
mask=mask)
# Now concatenate the first and rest of the blocks.
final_output = mtf.concat([first_output, tail_output], num_blocks.name)
final_output = mtf.reshape(final_output, mtf.Shape(
[batch, heads, query_length, kv_channels]))
return mtf.einsum([final_output, o_var],
mtf.Shape([batch, query_length, io_channels]))
def rename_length_to_memory_length(
x, length_name="length", memory_length_name="memory_length"):
return mtf.rename_dimension(x, length_name, memory_length_name)
def multihead_attention_vars(
mesh, heads, io_channels, kv_channels, activation_dtype):
"""Create Parameters for Multihead Attention.
Args:
mesh: a Mesh
heads: a Dimension
io_channels: a Dimension
kv_channels: a Dimension
activation_dtype: a tf.dtype
Returns:
q_var: a Tensor with shape [heads, io_channels, kv_channels]
k_var: a Tensor with shape [heads, io_channels, kv_channels]
v_var: a Tensor with shape [heads, io_channels, kv_channels]
o_var: a Tensor with shape [heads, io_channels, kv_channels]
"""
qkvo = mtf.Dimension("qkvo", 4)
qk_stddev = (io_channels.size ** -0.5) * (kv_channels.size ** -0.25)
v_stddev = io_channels.size ** -0.5
o_stddev = (io_channels.size * heads.size) ** -0.5
def qkvo_initializer(shape,
dtype=None,
partition_info=None,
verify_shape=None):
del partition_info, verify_shape
return tf.random_normal(shape, dtype=dtype) * tf.reshape(
[qk_stddev, qk_stddev, v_stddev, o_stddev], [4, 1, 1, 1])
var = mtf.get_variable(
mesh, "qkvo", mtf.Shape([qkvo, heads, io_channels, kv_channels]),
initializer=qkvo_initializer, activation_dtype=activation_dtype)
q_var, k_var, v_var, o_var = mtf.unstack(var, qkvo)
return q_var, k_var, v_var, o_var
def dot_product_attention(q,
k,
v,
mask,
dropout=0.0,
dropout_broadcast_dims=None):
"""Dot-product attention.
Args:
q: Tensor with shape [...., length_q, depth_k]. Typically leading dimensions
are [batch, heads].
k: Tensor with shape [..., length_kv, depth_k]. Leading dimensions must
match with q.
v: Tensor with shape [..., length_kv, depth_v] Leading dimensions must
match with q.
mask: mask Tensor (see attention_mask())
dropout: a float.
dropout_broadcast_dims: an optional list of mtf.Dimension
Returns:
Tensor with shape [..., length_q, depth_v].
"""
length_kv = k.shape.dims[-2]
logits_shape = mtf.Shape(q.shape.dims[:-1] + [length_kv])
logits = mtf.einsum([q, k], logits_shape)
if mask is not None:
logits += mask
weights = mtf.softmax(logits, length_kv)
if dropout != 0.0:
weights = mtf.dropout(
weights, 1.0 - dropout,
noise_shape=weights.shape - dropout_broadcast_dims)
depth_v = v.shape.dims[-1]
outputs_shape = mtf.Shape(q.shape.dims[:-1] + [depth_v])
outputs = mtf.einsum([weights, v], outputs_shape)
return outputs
def multihead_attention(query_antecedent,
memory_antecedent,
mask,
kv_channels,
heads,
dropout=0.0,
dropout_broadcast_dims=None,
name="multihead_attention"):
"""Multihead scaled-dot-product attention with input/output transformations.
In order to use only one variable containing the four weight matrices
packed together, we insist that the query and memory antecedents have the
same dimensionality (io_channels) and that the keys and values have the
same dimensionality (kv_channels).
Args:
query_antecedent: a mtf.Tensor with shape
[<batch_dims>, query_length, io_channels]
memory_antecedent: a mtf.Tensor with shape
[batch, memory_length, io_channels] (optional)
mask: mask Tensor (see attention_mask())
kv_channels: a mtf.Dimension (the size of the key and value vectors)
heads: a mtf.Dimension (the number of heads)
dropout: a floating point value
dropout_broadcast_dims: an optional list of mtf.Dimension
name: an optional string.
Returns:
A mtf.Tensor with shape [batch, query_length, io_channels]
Raises:
ValueError: if the dimensions do not match.
"""
batch_dims = query_antecedent.shape.dims[:-2]
query_length, io_channels = query_antecedent.shape.dims[-2:]
with tf.variable_scope(name,
default_name="multihead_attention",
values=[query_antecedent, memory_antecedent]):
q_var, k_var, v_var, o_var = multihead_attention_vars(
query_antecedent.mesh, heads, io_channels, kv_channels,
query_antecedent.dtype)
if memory_antecedent is None:
memory_antecedent = rename_length_to_memory_length(
query_antecedent, query_length.name)
memory_batch_dims = memory_antecedent.shape.dims[:-2]
memory_length, memory_channels = memory_antecedent.shape.dims[-2:]
if memory_batch_dims != batch_dims:
raise ValueError("memory batch must equal query batch")
if memory_channels != io_channels:
raise ValueError("memory channels must equal query channels")
q = mtf.einsum(
[query_antecedent, q_var],
mtf.Shape(batch_dims + [heads, query_length, kv_channels]))
k = mtf.einsum(
[memory_antecedent, k_var],
mtf.Shape(batch_dims + [heads, memory_length, kv_channels]))
v = mtf.einsum(
[memory_antecedent, v_var],
mtf.Shape(batch_dims + [heads, memory_length, kv_channels]))
o = dot_product_attention(
q, k, v, mask, dropout, dropout_broadcast_dims)
return mtf.einsum(
[o, o_var], mtf.Shape(batch_dims + [query_length, io_channels]))
def multihead_self_attention_incremental(query_antecedent,
prev_k,
prev_v,
step_num,
name="multihead_attention"):
"""Incremental self-attention (one decode step).
In order to use only one variable containing the four weight matrices
packed together, we insist that the query and memory antecedents have the
same dimensionality (io_channels) and that the keys and values have the
same dimensionality (kv_channels).
Args:
query_antecedent: a mtf.Tensor with shape [batch..., io_channels]
prev_k: mtf.Tensor with shape [batch..., heads, memory_length, kv_channels]
prev_v: mtf.Tensor with shape [batch..., heads, memory_length, kv_channels]
step_num: mtf Scalar with dtype tf.int32
name: an optional string.
Returns:
y: A mtf.Tensor with shape [batch..., io_channels]
new_k: mtf.Tensor with shape [batch..., heads, memory_length, kv_channels]
new_v: mtf.Tensor with shape [batch..., heads, memory_length, kv_channels]
Raises:
ValueError: if the dimensions do not match.
"""
batch_dims = query_antecedent.shape.dims[:-1]
io_channels = query_antecedent.shape.dims[-1]
heads, memory_length, kv_channels = prev_k.shape.dims[-3:]
with tf.variable_scope(name, default_name="multihead_attention"):
q_var, k_var, v_var, o_var = multihead_attention_vars(
query_antecedent.mesh, heads, io_channels, kv_channels,
query_antecedent.dtype)
memory_antecedent = query_antecedent
q = mtf.einsum(
[query_antecedent, q_var],
mtf.Shape(batch_dims + [heads, kv_channels]))
k = mtf.einsum(
[memory_antecedent, k_var],
mtf.Shape(batch_dims + [heads, kv_channels]))
v = mtf.einsum(
[memory_antecedent, v_var],
mtf.Shape(batch_dims + [heads, kv_channels]))
k = prev_k + mtf.multiply(
k, mtf.one_hot(step_num, memory_length), output_shape=prev_k.shape)
v = prev_v + mtf.multiply(
v, mtf.one_hot(step_num, memory_length), output_shape=prev_v.shape)
mask = mtf.to_float(mtf.greater(mtf.range(
query_antecedent.mesh, memory_length, dtype=tf.int32), step_num)
) * -1e9
o = dot_product_attention(q, k, v, mask)
y = mtf.einsum([o, o_var], query_antecedent.shape)
return y, k, v
def multihead_encdec_attention_incremental(query_antecedent,
q_var, o_var, k, v,
mask,
name="multihead_attention"):
"""Incremental attention over encoder (one decode step).
In order to use only one variable containing the four weight matrices
packed together, we insist that the query and memory antecedents have the
same dimensionality (io_channels) and that the keys and values have the
same dimensionality (kv_channels).
memory_dims is a subset of query_dims
Args:
query_antecedent: a mtf.Tensor with shape query_dims + [io_channels]
q_var: a mtf.Tensor with shape [heads, io_channels, kv_channels]
o_var: a mtf.Tensor with shape [heads, io_channels, kv_channels]
k: memory_dims + [heads, memory_length, kv_channels]
v: memory_dims + [heads, memory_length, kv_channels]
mask: mask Tensor (see attention_mask())
name: an optional string.
Returns:
A mtf.Tensor with shape [batch, qlen, io_channels]
"""
heads, _, kv_channels = k.shape.dims[-3:]
query_dims = query_antecedent.shape.dims[:-1]
with tf.variable_scope(name, default_name="multihead_attention"):
q = mtf.einsum(
[query_antecedent, q_var],
mtf.Shape(query_dims + [heads, kv_channels]))
o = dot_product_attention(q, k, v, mask)
return mtf.einsum([o, o_var], query_antecedent.shape)
def attention_mask_ignore_padding(inputs, dtype=tf.float32):
"""Bias for encoder-decoder attention.
Args:
inputs: a mtf.Tensor with shape [..., length_dim]
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [..., memory_length_dim]
"""
inputs = rename_length_to_memory_length(inputs)
return mtf.cast(mtf.equal(inputs, 0), dtype) * -1e9
def attention_mask_autoregressive(query_pos, dtype=tf.float32):
"""Bias for self-attention where attention to the right is disallowed.
Args:
query_pos: a mtf.Tensor with shape [..., length_dim]
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [..., length_dim, memory_length_dim]
"""
memory_pos = rename_length_to_memory_length(query_pos)
return mtf.cast(mtf.less(query_pos, memory_pos), dtype) * -1e9
def attention_mask_same_segment(
query_segment, memory_segment=None, dtype=tf.float32):
"""Bias for attention where attention between segments is disallowed.
Args:
query_segment: a mtf.Tensor with shape [..., length_dim]
memory_segment: a mtf.Tensor with shape [..., memory_length_dim]
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [..., length_dim, memory_length_dim]
"""
memory_segment = rename_length_to_memory_length(
memory_segment or query_segment)
return mtf.cast(mtf.not_equal(query_segment, memory_segment), dtype) * -1e9
def attention_bias_local_block(mesh, block_length, memory_length,
dtype=tf.int32):
"""Bias for attention for local blocks where attention to right is disallowed.
Args:
mesh: a MeshTensorflow object
block_length: a mtf.Dimension
memory_length: a mtf.Dimension
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [rows, cols]
"""
mask = mtf.cast(mtf.less(mtf.range(mesh, block_length, dtype=dtype),
mtf.range(mesh, memory_length, dtype=dtype)),
dtype=dtype)
mask = mtf.cast(mask, dtype=tf.float32) * -1e9
return mask
| 37.732657
| 80
| 0.665202
|
dc55c03f4a5e5a4732334eb58e2e7e4f3c83f97e
| 5,710
|
py
|
Python
|
tsx/counterfactuals/moc.py
|
MatthiasJakobs/tsx
|
8a686ffd0af2f9f826d9ce11349e0fa0e883e897
|
[
"MIT"
] | null | null | null |
tsx/counterfactuals/moc.py
|
MatthiasJakobs/tsx
|
8a686ffd0af2f9f826d9ce11349e0fa0e883e897
|
[
"MIT"
] | null | null | null |
tsx/counterfactuals/moc.py
|
MatthiasJakobs/tsx
|
8a686ffd0af2f9f826d9ce11349e0fa0e883e897
|
[
"MIT"
] | null | null | null |
import numpy as np
from itertools import permutations
from random import shuffle
from tsx.utils import NSGA2
from tsx.distances import euclidian, dtw
from tsx.utils import to_numpy, sigmoid
from sklearn.neighbors import KNeighborsClassifier
# Implements the approach by Dandl et al. (2020), but for time series data
# https://arxiv.org/abs/2004.11165
class MOC(NSGA2):
# TODO: Only for time series data of same length
def __init__(self, model, X, y, **kwargs):
self.model = model
self.X = to_numpy(X)
self.y = to_numpy(y)
self.nr_classes = len(np.unique(y))
self.knn = KNeighborsClassifier(n_neighbors=3, algorithm="ball_tree", metric=dtw, n_jobs=4)
self.knn.fit(self.X, self.y)
self.mutation_rate = 0.5 # how many individuals to mutate (on average)
self.mutation_strength = 0.4 # how many points in each individual to mutate (on average)
super().__init__(**kwargs)
def generate(self, x_star, target=None):
x_star = to_numpy(x_star)
if target is None:
# untargeted attack
# TODO
raise NotImplementedError()
criterias = [
self.generate_obj_1(self.nr_classes, target, True), #TODO
self.generate_obj_2(x_star, dtw),
self.generate_obj_3(x_star),
self.generate_obj_4()
]
self.set_criterias(criterias)
fitness, xs = self.run(guide=x_star)
true_counterfactual_indices = fitness[:, 0] == 0
return fitness[true_counterfactual_indices], xs[true_counterfactual_indices]
def _random_individuals(self, n, guide=None):
# assumption: zero mean ts
ind_length = len(self.X[0])
# just copy initial point
if guide is not None:
if guide.ndim == 1:
return self.mutation(np.tile(np.expand_dims(guide, 0), (n, 1)), override_mutation_rate=1.0)
if guide.ndim == 2 and guide.shape[0] == 1:
return self.mutation(np.tile(guide, (n, 1)), override_mutation_rate=1.0)
raise Exception("guide has unsuported shape {}".format(guide.shape))
# cumsum
return np.cumsum(np.random.normal(size=(n, ind_length)), axis=1)
# naive random
# return np.random.rand(n * ind_length).reshape(n, -1)
def recombination(self, x):
# 1 point crossover (naive)
recomb_combinations = list(permutations(np.arange(len(x)), 2))
shuffle(recomb_combinations)
recomb_combinations = recomb_combinations[:self.offspring_size]
to_return = np.zeros((self.offspring_size, len(x[0])))
for i, (s, t) in enumerate(recomb_combinations):
crossover_point = np.random.randint(1, len(x[0]))
to_return[i][:crossover_point] = x[s][:crossover_point]
to_return[i][crossover_point:] = x[t][crossover_point:]
return to_return
def mutation(self, x, override_mutation_rate=None):
ind_length = len(self.X[0])
assert self.parent_size == self.offspring_size
# mask to choose individuals to mutate
if override_mutation_rate is not None:
rate_mask = np.random.binomial(1, p=override_mutation_rate, size=len(x))
else:
rate_mask = np.random.binomial(1, p=self.mutation_rate, size=len(x))
for idx in rate_mask.nonzero()[0]:
# mask to choose features to mutate
strength_mask = np.random.binomial(1, p=self.mutation_strength, size=ind_length)
x[idx] = x[idx] + strength_mask * np.random.normal(size=ind_length) * 0.6
return x
# prediction close to desired outcome
def generate_obj_1(self, nr_classes, out_class_index, targeted):
threshold = 1.0 / nr_classes
def obj_1(x):
predictions = self.model.proba(x)
# weird edge case for binary sklearn classifier without proba
if self.nr_classes == 2:
predictions_class_one = np.expand_dims(sigmoid(predictions), 1)
predictions = np.concatenate((1-predictions_class_one, predictions_class_one), axis=1)
assert np.max(predictions) <= 1 and np.min(predictions) >= 0, print(predictions_class_one)
predictions = predictions[:, out_class_index]
mask = predictions > threshold
inv_mask = np.logical_not(mask)
predictions[mask] = 0
predictions[inv_mask] = np.abs(predictions[inv_mask]-threshold)
return predictions.squeeze()
return obj_1
# distance close to initial point
# x_star is initial point
def generate_obj_2(self, x_star, distance):
def obj_2(x):
n_elements = x.shape[0]
distances = np.zeros(n_elements)
for i in range(n_elements):
distances[i] = distance(np.expand_dims(x[i], 0), x_star)
return distances
return obj_2
# differs from x_star only in few dimensions
def generate_obj_3(self, x_star, threshold=0.01):
def obj_3(x):
# l_0 norm
# TODO: Use np.isclose ?
difference = np.abs(x-x_star)
difference[difference < threshold] = 0.0
difference[difference >= threshold] = 1.0
return np.sum(difference, axis=1)
return obj_3
# plausible datapoint
# use mean of DTW between k nearest neighbours (analog to Dandl paper, but with DTW)
def generate_obj_4(self, k=3):
def obj_4(x):
neighbors_distance, _ = self.knn.kneighbors(x, k, True)
return np.mean(neighbors_distance, axis=1)
return obj_4
| 36.139241
| 107
| 0.624694
|
940158cb8fdc541a98688ee3d82414e3fb22ab30
| 840
|
py
|
Python
|
boopy-master/booloader.py
|
Zerolang-in/Zero
|
0e147c93eda85b9cfbed42239d7fccc3566b7e31
|
[
"MIT"
] | null | null | null |
boopy-master/booloader.py
|
Zerolang-in/Zero
|
0e147c93eda85b9cfbed42239d7fccc3566b7e31
|
[
"MIT"
] | null | null | null |
boopy-master/booloader.py
|
Zerolang-in/Zero
|
0e147c93eda85b9cfbed42239d7fccc3566b7e31
|
[
"MIT"
] | null | null | null |
import logging
from boolexer import BooLexer
from booparser import BooParser
from booeval import BooEval
class BooLoader:
def __init__(self, filename):
prog = Program(open(filename, "r"))
prog.Run()
class Program:
def __init__(self, fh):
self.code = fh
self.tokens = []
def Run(self):
for line in self.code:
#print(f"line: {line}")
lexer = BooLexer()
lexer.Run(line)
for token in lexer.tokens:
self.tokens.append(token)
#print(f"tokens: {self.tokens}")
parser = BooParser(self.tokens)
parser.Run()
#print(f"vars: {parser.variables}")
#print(f"ast: {parser.ast}")
evaluator = BooEval(parser.ast)
evaluator.Run()
print(f"ouput: {evaluator.output}")
| 22.105263
| 43
| 0.569048
|
ae6242bc2b2fda1a3e3478eb145234b84165bc44
| 3,103
|
py
|
Python
|
huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/restore_instance_response.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 64
|
2020-06-12T07:05:07.000Z
|
2022-03-30T03:32:50.000Z
|
huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/restore_instance_response.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 11
|
2020-07-06T07:56:54.000Z
|
2022-01-11T11:14:40.000Z
|
huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/restore_instance_response.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 24
|
2020-06-08T11:42:13.000Z
|
2022-03-04T06:44:08.000Z
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class RestoreInstanceResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'job_id': 'str'
}
attribute_map = {
'job_id': 'job_id'
}
def __init__(self, job_id=None):
"""RestoreInstanceResponse - a model defined in huaweicloud sdk"""
super(RestoreInstanceResponse, self).__init__()
self._job_id = None
self.discriminator = None
if job_id is not None:
self.job_id = job_id
@property
def job_id(self):
"""Gets the job_id of this RestoreInstanceResponse.
恢复到当前实例的异步任务ID。
:return: The job_id of this RestoreInstanceResponse.
:rtype: str
"""
return self._job_id
@job_id.setter
def job_id(self, job_id):
"""Sets the job_id of this RestoreInstanceResponse.
恢复到当前实例的异步任务ID。
:param job_id: The job_id of this RestoreInstanceResponse.
:type: str
"""
self._job_id = job_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RestoreInstanceResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.982609
| 79
| 0.557203
|
3fba5e51eaa2f57eda17accc59bef585f894e54a
| 790
|
py
|
Python
|
aiohttp__asyncio__examples/aiohttp_client.py
|
gil9red/SimplePyScripts
|
c191ce08fbdeb29377639184579e392057945154
|
[
"CC-BY-4.0"
] | 117
|
2015-12-18T07:18:27.000Z
|
2022-03-28T00:25:54.000Z
|
aiohttp__asyncio__examples/aiohttp_client.py
|
gil9red/SimplePyScripts
|
c191ce08fbdeb29377639184579e392057945154
|
[
"CC-BY-4.0"
] | 8
|
2018-10-03T09:38:46.000Z
|
2021-12-13T19:51:09.000Z
|
aiohttp__asyncio__examples/aiohttp_client.py
|
gil9red/SimplePyScripts
|
c191ce08fbdeb29377639184579e392057945154
|
[
"CC-BY-4.0"
] | 28
|
2016-08-02T17:43:47.000Z
|
2022-03-21T08:31:12.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import asyncio
# pip install aiohttp
import aiohttp
async def main():
url = 'https://python.org'
async with aiohttp.ClientSession() as session:
async with session.get(url) as rs:
print("Status:", rs.status)
print("Content-type:", rs.headers['Content-Type'])
html = await rs.text()
print("Body:", html[:15], "...")
print()
async with aiohttp.request("GET", url) as rs:
print("Status:", rs.status)
print("Content-type:", rs.headers['Content-Type'])
html = await rs.text()
print("Body:", html[:15], "...")
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| 21.351351
| 62
| 0.578481
|
e38f556c06ad6207dfc3c7ca1c861370b8d7593a
| 481
|
py
|
Python
|
examples/video_stream_try.py
|
b07901064/drone_0
|
7cd6be039bc03a9d6286e6dfbcf13dbbb020b697
|
[
"MIT"
] | null | null | null |
examples/video_stream_try.py
|
b07901064/drone_0
|
7cd6be039bc03a9d6286e6dfbcf13dbbb020b697
|
[
"MIT"
] | null | null | null |
examples/video_stream_try.py
|
b07901064/drone_0
|
7cd6be039bc03a9d6286e6dfbcf13dbbb020b697
|
[
"MIT"
] | null | null | null |
from djitellopy import Tello
from cv2 import cv2
import time
tello =Tello()
tello.connect()
#tello.takeoff()
tello.streamon()
count = 0
vidcap = cv2.VideoCapture(tello.get_udp_video_address())
success,image = vidcap.read()
success = True
while success:
vidcap.set(cv2.CAP_PROP_POS_MSEC,(count*1000))
success,image = vidcap.read()
print ('Read a new frame: ', success)
cv2.imwrite("./img/frame%d.jpg" % count, image) # save frame as JPEG file
count = count + 5
| 25.315789
| 77
| 0.713098
|
9fbc0a7dc9fec3ea79b8a74e3472b43ecea0a8bb
| 2,065
|
py
|
Python
|
examples/hacker_news/hacker_news_tests/test_resources/test_snowflake_io_manager.py
|
JBrVJxsc/dagster
|
680aa23387308335eb0eccfa9241b26d10a2d627
|
[
"Apache-2.0"
] | 1
|
2021-07-03T09:05:58.000Z
|
2021-07-03T09:05:58.000Z
|
examples/hacker_news/hacker_news_tests/test_resources/test_snowflake_io_manager.py
|
JBrVJxsc/dagster
|
680aa23387308335eb0eccfa9241b26d10a2d627
|
[
"Apache-2.0"
] | null | null | null |
examples/hacker_news/hacker_news_tests/test_resources/test_snowflake_io_manager.py
|
JBrVJxsc/dagster
|
680aa23387308335eb0eccfa9241b26d10a2d627
|
[
"Apache-2.0"
] | null | null | null |
import os
import uuid
from contextlib import contextmanager
from dagster import InputContext, OutputContext
from hacker_news.resources.snowflake_io_manager import ( # pylint: disable=E0401
SnowflakeIOManager,
connect_snowflake,
)
from pandas import DataFrame
def generate_snowflake_config():
return {
"account": os.getenv("SNOWFLAKE_ACCOUNT"),
"user": os.getenv("SNOWFLAKE_USER"),
"password": os.getenv("SNOWFLAKE_PASSWORD"),
"database": "DEMO_DB",
"warehouse": "TINY_WAREHOUSE",
}
@contextmanager
def temporary_snowflake_table(contents: DataFrame):
snowflake_config = generate_snowflake_config()
table_name = "a" + str(uuid.uuid4()).replace("-", "_")
with connect_snowflake(snowflake_config) as con:
contents.to_sql(name=table_name, con=con, index=False, schema="public")
try:
yield table_name
finally:
with connect_snowflake(snowflake_config) as conn:
conn.execute(f"drop table public.{table_name}")
def test_handle_output_then_load_input():
snowflake_config = generate_snowflake_config()
snowflake_manager = SnowflakeIOManager()
contents1 = DataFrame([{"col1": "a", "col2": 1}]) # just to get the types right
contents2 = DataFrame([{"col1": "b", "col2": 2}]) # contents we will insert
with temporary_snowflake_table(contents1) as temp_table_name:
metadata = {
"table": f"public.{temp_table_name}",
}
output_context = OutputContext(
step_key="a",
name="result",
pipeline_name="fake_pipeline",
metadata=metadata,
resource_config=snowflake_config,
)
list(snowflake_manager.handle_output(output_context, contents2)) # exhaust the iterator
input_context = InputContext(
upstream_output=output_context, resource_config=snowflake_config
)
input_value = snowflake_manager.load_input(input_context)
assert input_value.equals(contents2), f"{input_value}\n\n{contents2}"
| 33.852459
| 96
| 0.680872
|
9b47497a72abfaf155d443dc0afd15c9a8b621fa
| 2,147
|
py
|
Python
|
data/process_data.py
|
asxd-10/Disaster-Response-Pipeline
|
2277e5bb90fa96425a6d7f221572fc8ea537c455
|
[
"MIT"
] | null | null | null |
data/process_data.py
|
asxd-10/Disaster-Response-Pipeline
|
2277e5bb90fa96425a6d7f221572fc8ea537c455
|
[
"MIT"
] | null | null | null |
data/process_data.py
|
asxd-10/Disaster-Response-Pipeline
|
2277e5bb90fa96425a6d7f221572fc8ea537c455
|
[
"MIT"
] | null | null | null |
import sys
import pandas as pd
# ETL pipeline
#Program to load and clean data before moving it to ML pipeline
def load_data(messages_filepath, categories_filepath):
messages = pd.read_csv(messages_filepath)
messages.head()
categories = pd.read_csv(categories_filepath)
categories.head()
df = pd.merge(messages,categories,on='id')
df.head()
return df
def clean_data(df):
categories = df.categories.str.split(";",expand=True)
row = categories.iloc[0,:]
category_colnames = list((row.apply(lambda x : x[:-2])).values)
print(category_colnames)
categories.columns = category_colnames
for column in categories:
categories[column] = pd.to_numeric(categories[column].apply(lambda x : x[-1]))
df.drop('categories',axis=1,inplace=True)
df = pd.concat([df, categories], axis = 1)
df.drop_duplicates(subset=['id'],inplace=True)
return df
from sqlalchemy import create_engine
def save_data(df, database_filename):
engine = create_engine('sqlite:///'+database_filename)
df.to_sql('dataset', engine, index=False)
def main():
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'
.format(messages_filepath, categories_filepath))
df = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
df = clean_data(df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories '\
'datasets as the first and second argument respectively, as '\
'well as the filepath of the database to save the cleaned data '\
'to as the third argument. \n\nExample: python process_data.py '\
'disaster_messages.csv disaster_categories.csv '\
'DisasterResponse.db')
if __name__ == '__main__':
main()
| 33.030769
| 86
| 0.654401
|
6af549228b5620f0218a2e05133e232404a969c5
| 2,753
|
py
|
Python
|
python/test/clienttest.py
|
cjh1/molequeue
|
37dffbfe9cdbd2d18fd2c2c9eab6690006f18074
|
[
"BSD-3-Clause"
] | null | null | null |
python/test/clienttest.py
|
cjh1/molequeue
|
37dffbfe9cdbd2d18fd2c2c9eab6690006f18074
|
[
"BSD-3-Clause"
] | null | null | null |
python/test/clienttest.py
|
cjh1/molequeue
|
37dffbfe9cdbd2d18fd2c2c9eab6690006f18074
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
from functools import partial
import time
import molequeue
class TestClient(unittest.TestCase):
def test_submit_job_request(self):
client = molequeue.Client()
client.connect_to_server('MoleQueue')
job_request = molequeue.JobRequest()
job_request.queue = 'salix'
job_request.program = 'sleep (testing)'
molequeue_id = client.submit_job_request(job_request)
print "MoleQueue ID: ", molequeue_id
self.assertTrue(isinstance(molequeue_id, int))
client.disconnect()
def test_notification_callback(self):
client = molequeue.Client()
client.connect_to_server('MoleQueue')
self.callback_count = 0
def callback_counter(testcase, msg):
testcase.callback_count +=1
callback = partial(callback_counter, self)
client.register_notification_callback(callback)
client.register_notification_callback(callback)
job_request = molequeue.JobRequest()
job_request.queue = 'salix'
job_request.program = 'sleep (testing)'
molequeue_id = client.submit_job_request(job_request)
# wait for notification
time.sleep(1)
self.assertIs(self.callback_count, 2)
client.disconnect()
def test_wait_for_response_timeout(self):
client = molequeue.Client()
# Fake up the request
client._request_response_map[1] = None
start = time.time()
response = client._wait_for_response(1, 3)
end = time.time()
self.assertEqual(response, None)
self.assertEqual(int(end - start), 3)
def test_lookup_job(self):
client = molequeue.Client()
client.connect_to_server('MoleQueue')
expected_job_request = molequeue.JobRequest()
expected_job_request.queue = 'salix'
expected_job_request.program = 'sleep (testing)'
expected_job_request.description = 'This is a test job'
expected_job_request.hide_from_gui = True
expected_job_request.popup_on_state_change = False
molequeue_id = client.submit_job_request(expected_job_request)
jobrequest = client.lookup_job(molequeue_id)
self.assertEqual(molequeue_id, jobrequest.molequeue_id())
self.assertEqual(jobrequest.job_state(), molequeue.JobState.ACCEPTED)
self.assertTrue(isinstance(jobrequest.queue_id(), int))
self.assertEqual(jobrequest.queue, expected_job_request.queue)
self.assertEqual(jobrequest.program, expected_job_request.program)
self.assertEqual(jobrequest.description, expected_job_request.description)
self.assertEqual(jobrequest.hide_from_gui,
expected_job_request.hide_from_gui)
self.assertEqual(jobrequest.popup_on_state_change,
expected_job_request.popup_on_state_change)
client.disconnect()
if __name__ == '__main__':
unittest.main()
| 29.602151
| 78
| 0.739557
|
79db706529e5b2d5665272cc3c4b8d9e2be7e5c6
| 522
|
py
|
Python
|
service/AudioFile.py
|
rv8flyboy/pyrobotlab
|
4e04fb751614a5cb6044ea15dcfcf885db8be65a
|
[
"Apache-2.0"
] | 63
|
2015-02-03T18:49:43.000Z
|
2022-03-29T03:52:24.000Z
|
service/AudioFile.py
|
hirwaHenryChristian/pyrobotlab
|
2debb381fc2db4be1e7ea6e5252a50ae0de6f4a9
|
[
"Apache-2.0"
] | 16
|
2016-01-26T19:13:29.000Z
|
2018-11-25T21:20:51.000Z
|
service/AudioFile.py
|
hirwaHenryChristian/pyrobotlab
|
2debb381fc2db4be1e7ea6e5252a50ae0de6f4a9
|
[
"Apache-2.0"
] | 151
|
2015-01-03T18:55:54.000Z
|
2022-03-04T07:04:23.000Z
|
#########################################
# AudioFile.py
# more info @: http://myrobotlab.org/service/AudioFile
#########################################
# start the services
audiocapture = Runtime.start("audiocapture","AudioCapture")
audiofile = Runtime.start("audiofile", "AudioFile")
# it starts capturing audio
audiocapture.captureAudio()
# it will record for 5 seconds
sleep(5)
# save last capture
audiocapture.save("mycapture.wav");
# audiofile.playFile("c:/sounds/beep.mp3")
audiofile.playFile("mycapture.wav")
| 27.473684
| 59
| 0.637931
|
ce8331ad4ad79eb03f9e67aec3c3ebb64cbc8769
| 574
|
py
|
Python
|
Lesson13_UsingDictionaries/2-GetInvalidKey.py
|
StyvenSoft/degree-python
|
644953608948f341f5a20ceb9a02976a128b472b
|
[
"MIT"
] | null | null | null |
Lesson13_UsingDictionaries/2-GetInvalidKey.py
|
StyvenSoft/degree-python
|
644953608948f341f5a20ceb9a02976a128b472b
|
[
"MIT"
] | null | null | null |
Lesson13_UsingDictionaries/2-GetInvalidKey.py
|
StyvenSoft/degree-python
|
644953608948f341f5a20ceb9a02976a128b472b
|
[
"MIT"
] | null | null | null |
building_heights = {"Burj Khalifa": 828, "Shanghai Tower": 632, "Abraj Al Bait": 601, "Ping An": 599, "Lotte World Tower": 554.5, "One World Trade": 541.3}
key_to_check = "Shanghai Tower"
if key_to_check in building_heights:
print(building_heights["Shanghai Tower"])
# 632
zodiac_elements = {"water": ["Cancer", "Scorpio", "Pisces"], "fire": ["Aries", "Leo", "Sagittarius"], "earth": ["Taurus", "Virgo", "Capricorn"], "air":["Gemini", "Libra", "Aquarius"]}
zodiac_elements["energy"] = "Not a Zodiac element"
print(zodiac_elements["energy"])
# Not a Zodiac element
| 31.888889
| 183
| 0.6777
|
08ef7eb5109dfcb6a6344f54cbb0b7f7b90abfdb
| 1,915
|
py
|
Python
|
Tutorials/python/dash-web-app/resources/home/dnanexus/my_app.py
|
dnanexus/dnanexus-example-applets
|
04a6abebfef3039416cede58f39a8c3542468d3b
|
[
"Apache-2.0"
] | 15
|
2015-02-24T06:11:31.000Z
|
2021-09-23T15:35:48.000Z
|
Tutorials/python/dash-web-app/resources/home/dnanexus/my_app.py
|
dnanexus/dnanexus-example-applets
|
04a6abebfef3039416cede58f39a8c3542468d3b
|
[
"Apache-2.0"
] | 10
|
2017-11-10T21:55:44.000Z
|
2022-02-14T19:58:34.000Z
|
Tutorials/python/dash-web-app/resources/home/dnanexus/my_app.py
|
dnanexus/dnanexus-example-applets
|
04a6abebfef3039416cede58f39a8c3542468d3b
|
[
"Apache-2.0"
] | 11
|
2015-09-28T00:33:17.000Z
|
2020-10-22T14:42:37.000Z
|
# -*- coding: utf-8 -*-
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import plotly.graph_objs as go
def create_app():
app = dash.Dash(__name__)
df = pd.read_csv('gdp-life-exp-2007.csv')
app.layout = html.Div(children=[
html.H1(children='Dash works on DNAnexus!'),
dcc.Graph(
id='life-exp-vs-gdp',
figure={
'data': [
go.Scatter(
x=df[df['continent'] == i]['gdp per capita'],
y=df[df['continent'] == i]['life expectancy'],
text=df[df['continent'] == i]['country'],
mode='markers',
opacity=0.7,
marker={
'size': 15,
'line': {'width': 0.5, 'color': 'white'}
},
name=i
) for i in df.continent.unique()
],
'layout': go.Layout(
xaxis={'type': 'log', 'title': 'GDP Per Capita'},
yaxis={'title': 'Life Expectancy'},
legend={'x': 0, 'y': 1},
hovermode='closest',
title="As proof, here is an interactive Gapminder-style scatter plot"
)
}
),
dcc.Markdown('''
You can also write in Markdown, so we can easily write documentation straight into the interface. This is how you make an applet open up HTTPS by the way. Just add this to the dxapp.json:
```
"httpsApp": {"ports":[443], "shared_access": "VIEW"},
```
And then your web app should output on port 443.
'''),
dcc.Markdown('''
For more information on what you can build with Dash, see the Dash [tutorial](https://dash.plot.ly/).
''')
])
return app
| 36.132075
| 187
| 0.48094
|
b7b6e802dfae9d92ddfe1ead8915dcc75e051589
| 2,406
|
py
|
Python
|
mopidy_headless/input.py
|
avanc/mopidy-headless
|
339af3d3697556be342bbd18d48d7c6102638af9
|
[
"Apache-2.0"
] | 6
|
2020-08-25T06:37:06.000Z
|
2021-11-14T17:25:17.000Z
|
mopidy_headless/input.py
|
avanc/mopidy-headless
|
339af3d3697556be342bbd18d48d7c6102638af9
|
[
"Apache-2.0"
] | null | null | null |
mopidy_headless/input.py
|
avanc/mopidy-headless
|
339af3d3697556be342bbd18d48d7c6102638af9
|
[
"Apache-2.0"
] | 1
|
2020-12-21T20:24:05.000Z
|
2020-12-21T20:24:05.000Z
|
from __future__ import unicode_literals
import logging
import threading
from evdev import ecodes, InputDevice, list_devices, categorize
from select import select
logger = logging.getLogger(__name__)
class InputThread(threading.Thread):
def __init__(self):
super(InputThread, self).__init__()
self.name="Input Thread"
self._stop=threading.Event()
self.devices_by_fn={}
self.devices_by_fd={}
self.handlers_by_fd={}
def stop(self):
self._stop.set()
logger.debug("Thread {0} is asked to stop".format(self.name))
def registerHandler(self, handler):
if (handler.device_fn in self.devices_by_fn):
device=self.devices_by_fn[handler.device_fn]
else:
device=InputDevice(handler.device_fn)
self.devices_by_fn[handler.device_fn]=device
self.devices_by_fd[device.fd]=device
self.handlers_by_fd[device.fd]=[]
#Check if device has needed event
capabilities= device.capabilities()
if handler.event_type in capabilities:
if (handler.event_code in capabilities[handler.event_type]):
self.handlers_by_fd[device.fd].append(handler)
return True
else:
logger.warning('Event {0} not found in input device "{1}"'.format(ecodes.bytype[handler.event_type][handler.event_code], device.name))
else:
logger.warning('Input device "{1}" has no capability {0}'.format(ecodes.EV[handler.event_type], device.name))
return False
def run(self):
while not self._stop.isSet():
r,w,x = select(self.devices_by_fd, [], [], 10)
for fd in r:
for event in self.devices_by_fd[fd].read():
for handler in self.handlers_by_fd[fd]:
handler.check(event)
logger.debug("Thread {0} stopped".format(self.name))
class Handler(object):
def __init__(self, device_fn, event_type, event_code):
self.device_fn=device_fn
if (event_type in ecodes.ecodes):
self.event_type=ecodes.ecodes[event_type]
else:
logger.error('Event type {0} unknown'.format(event_type))
if (event_code in ecodes.ecodes):
self.event_code=ecodes.ecodes[event_code]
else:
logger.error('Event {0} not found for {1} events'.format(event_code, event_type))
def check(self, event):
if self.event_type == event.type:
if self.event_code == event.code:
self.handle(event)
def handle(self, event):
pass
| 27.976744
| 144
| 0.686201
|
5291c1522565aa82514608dba1886b28edb834a8
| 1,164
|
py
|
Python
|
boto3_exceptions/cloudformation.py
|
siteshen/boto3_exceptions
|
d6174c2577c9d4b17a09a89cd0e4bd1fe555b26b
|
[
"MIT"
] | 2
|
2021-06-22T00:00:35.000Z
|
2021-07-15T03:25:52.000Z
|
boto3_exceptions/cloudformation.py
|
siteshen/boto3_exceptions
|
d6174c2577c9d4b17a09a89cd0e4bd1fe555b26b
|
[
"MIT"
] | null | null | null |
boto3_exceptions/cloudformation.py
|
siteshen/boto3_exceptions
|
d6174c2577c9d4b17a09a89cd0e4bd1fe555b26b
|
[
"MIT"
] | null | null | null |
import boto3
exceptions = boto3.client('cloudformation').exceptions
AlreadyExistsException = exceptions.AlreadyExistsException
ChangeSetNotFoundException = exceptions.ChangeSetNotFoundException
CreatedButModifiedException = exceptions.CreatedButModifiedException
InsufficientCapabilitiesException = exceptions.InsufficientCapabilitiesException
InvalidChangeSetStatusException = exceptions.InvalidChangeSetStatusException
InvalidOperationException = exceptions.InvalidOperationException
LimitExceededException = exceptions.LimitExceededException
NameAlreadyExistsException = exceptions.NameAlreadyExistsException
OperationIdAlreadyExistsException = exceptions.OperationIdAlreadyExistsException
OperationInProgressException = exceptions.OperationInProgressException
OperationNotFoundException = exceptions.OperationNotFoundException
StackInstanceNotFoundException = exceptions.StackInstanceNotFoundException
StackSetNotEmptyException = exceptions.StackSetNotEmptyException
StackSetNotFoundException = exceptions.StackSetNotFoundException
StaleRequestException = exceptions.StaleRequestException
TokenAlreadyExistsException = exceptions.TokenAlreadyExistsException
| 55.428571
| 80
| 0.919244
|
8a53ef5b3e2272e0c5961944b446d846491bf8db
| 13,780
|
py
|
Python
|
cirq-core/cirq/sim/clifford/stabilizer_state_ch_form.py
|
thomascherickal/Cirq
|
43b37fa56311f19ca9848f4dc4e162bc8a1bbc83
|
[
"Apache-2.0"
] | null | null | null |
cirq-core/cirq/sim/clifford/stabilizer_state_ch_form.py
|
thomascherickal/Cirq
|
43b37fa56311f19ca9848f4dc4e162bc8a1bbc83
|
[
"Apache-2.0"
] | null | null | null |
cirq-core/cirq/sim/clifford/stabilizer_state_ch_form.py
|
thomascherickal/Cirq
|
43b37fa56311f19ca9848f4dc4e162bc8a1bbc83
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, Sequence, Union
import numpy as np
import cirq
from cirq import protocols, qis, value
from cirq.value import big_endian_int_to_digits
@value.value_equality
class StabilizerStateChForm(qis.StabilizerState):
r"""A representation of stabilizer states using the CH form,
$|\psi> = \omega U_C U_H |s>$
This representation keeps track of overall phase.
Reference: https://arxiv.org/abs/1808.00128
"""
def __init__(self, num_qubits: int, initial_state: int = 0) -> None:
"""Initializes StabilizerStateChForm
Args:
num_qubits: The number of qubits in the system.
initial_state: The computational basis representation of the
state as a big endian int.
"""
self.n = num_qubits
# The state is represented by a set of binary matrices and vectors.
# See Section IVa of Bravyi et al
self.G = np.eye(self.n, dtype=bool)
self.F = np.eye(self.n, dtype=bool)
self.M = np.zeros((self.n, self.n), dtype=bool)
self.gamma = np.zeros(self.n, dtype=int)
self.v = np.zeros(self.n, dtype=bool)
self.s = np.zeros(self.n, dtype=bool)
self.omega: complex = 1
# Apply X for every non-zero element of initial_state
for (i, val) in enumerate(
big_endian_int_to_digits(initial_state, digit_count=num_qubits, base=2)
):
if val:
self.apply_x(i)
def _json_dict_(self) -> Dict[str, Any]:
return protocols.obj_to_dict_helper(self, ['n', 'G', 'F', 'M', 'gamma', 'v', 's', 'omega'])
@classmethod
def _from_json_dict_(cls, n, G, F, M, gamma, v, s, omega, **kwargs):
copy = StabilizerStateChForm(n)
copy.G = np.array(G)
copy.F = np.array(F)
copy.M = np.array(M)
copy.gamma = np.array(gamma)
copy.v = np.array(v)
copy.s = np.array(s)
copy.omega = omega
return copy
def _value_equality_values_(self) -> Any:
return (self.n, self.G, self.F, self.M, self.gamma, self.v, self.s, self.omega)
def copy(self) -> 'cirq.StabilizerStateChForm':
copy = StabilizerStateChForm(self.n)
copy.G = self.G.copy()
copy.F = self.F.copy()
copy.M = self.M.copy()
copy.gamma = self.gamma.copy()
copy.v = self.v.copy()
copy.s = self.s.copy()
copy.omega = self.omega
return copy
def __str__(self) -> str:
"""Return the state vector string representation of the state."""
return cirq.dirac_notation(self.to_state_vector())
def __repr__(self) -> str:
"""Return the CH form representation of the state."""
return f'StabilizerStateChForm(num_qubits={self.n!r})'
def inner_product_of_state_and_x(self, x: int) -> Union[float, complex]:
"""Returns the amplitude of x'th element of
the state vector, i.e. <x|psi>"""
if type(x) == int:
y = cirq.big_endian_int_to_bits(x, bit_count=self.n)
mu = sum(y * self.gamma)
u = np.zeros(self.n, dtype=bool)
for p in range(self.n):
if y[p]:
u ^= self.F[p, :]
mu += 2 * (sum(self.M[p, :] & u) % 2)
return (
self.omega
* 2 ** (-sum(self.v) / 2)
* 1j ** mu
* (-1) ** sum(self.v & u & self.s)
* np.all(self.v | (u == self.s))
)
def state_vector(self) -> np.ndarray:
wf = np.zeros(2 ** self.n, dtype=complex)
for x in range(2 ** self.n):
wf[x] = self.inner_product_of_state_and_x(x)
return wf
def _S_right(self, q):
r"""Right multiplication version of S gate."""
self.M[:, q] ^= self.F[:, q]
self.gamma[:] = (self.gamma[:] - self.F[:, q]) % 4
def _CZ_right(self, q, r):
r"""Right multiplication version of CZ gate."""
self.M[:, q] ^= self.F[:, r]
self.M[:, r] ^= self.F[:, q]
self.gamma[:] = (self.gamma[:] + 2 * self.F[:, q] * self.F[:, r]) % 4
def _CNOT_right(self, q, r):
r"""Right multiplication version of CNOT gate."""
self.G[:, q] ^= self.G[:, r]
self.F[:, r] ^= self.F[:, q]
self.M[:, q] ^= self.M[:, r]
def update_sum(self, t, u, delta=0, alpha=0):
"""Implements the transformation (Proposition 4 in Bravyi et al)
``i^alpha U_H (|t> + i^delta |u>) = omega W_C W_H |s'>``
"""
if np.all(t == u):
self.s = t
self.omega *= 1 / np.sqrt(2) * (-1) ** alpha * (1 + 1j ** delta)
return
set0 = np.where((~self.v) & (t ^ u))[0]
set1 = np.where(self.v & (t ^ u))[0]
# implement Vc
if len(set0) > 0:
q = set0[0]
for i in set0:
if i != q:
self._CNOT_right(q, i)
for i in set1:
self._CZ_right(q, i)
elif len(set1) > 0:
q = set1[0]
for i in set1:
if i != q:
self._CNOT_right(i, q)
e = np.zeros(self.n, dtype=bool)
e[q] = True
if t[q]:
y = u ^ e
z = u
else:
y = t
z = t ^ e
(omega, a, b, c) = self._H_decompose(self.v[q], y[q], z[q], delta)
self.s = y
self.s[q] = c
self.omega *= (-1) ** alpha * omega
if a:
self._S_right(q)
self.v[q] ^= b ^ self.v[q]
def _H_decompose(self, v, y, z, delta):
"""Determines the transformation
H^v (|y> + i^delta |z>) = omega S^a H^b |c>
where the state represents a single qubit.
Input: v,y,z are boolean; delta is an integer (mod 4)
Outputs: a,b,c are boolean; omega is a complex number
Precondition: y != z"""
if y == z:
raise ValueError('|y> is equal to |z>')
if not v:
omega = (1j) ** (delta * int(y))
delta2 = ((-1) ** y * delta) % 4
c = bool((delta2 >> 1))
a = bool(delta2 & 1)
b = True
else:
if not (delta & 1):
a = False
b = False
c = bool(delta >> 1)
omega = (-1) ** (c & y)
else:
omega = 1 / np.sqrt(2) * (1 + 1j ** delta)
b = True
a = True
c = not ((delta >> 1) ^ y)
return omega, a, b, c
def to_state_vector(self) -> np.ndarray:
arr = np.zeros(2 ** self.n, dtype=complex)
for x in range(len(arr)):
arr[x] = self.inner_product_of_state_and_x(x)
return arr
def _measure(self, q, prng: np.random.RandomState) -> int:
"""Measures the q'th qubit.
Reference: Section 4.1 "Simulating measurements"
Returns: Computational basis measurement as 0 or 1.
"""
w = self.s.copy()
for i, v_i in enumerate(self.v):
if v_i == 1:
w[i] = bool(prng.randint(2))
x_i = sum(w & self.G[q, :]) % 2
# Project the state to the above measurement outcome.
self.project_Z(q, x_i)
return x_i
def project_Z(self, q, z):
"""Applies a Z projector on the q'th qubit.
Returns: a normalized state with Z_q |psi> = z |psi>
"""
t = self.s.copy()
u = (self.G[q, :] & self.v) ^ self.s
delta = (2 * sum((self.G[q, :] & (~self.v)) & self.s) + 2 * z) % 4
if np.all(t == u):
self.omega /= np.sqrt(2)
self.update_sum(t, u, delta=delta)
def kron(self, other: 'cirq.StabilizerStateChForm') -> 'cirq.StabilizerStateChForm':
n = self.n + other.n
copy = StabilizerStateChForm(n)
copy.G[: self.n, : self.n] = self.G
copy.G[self.n :, self.n :] = other.G
copy.F[: self.n, : self.n] = self.F
copy.F[self.n :, self.n :] = other.F
copy.M[: self.n, : self.n] = self.M
copy.M[self.n :, self.n :] = other.M
copy.gamma = np.concatenate([self.gamma, other.gamma])
copy.v = np.concatenate([self.v, other.v])
copy.s = np.concatenate([self.s, other.s])
copy.omega = self.omega * other.omega
return copy
def reindex(self, axes: Sequence[int]) -> 'cirq.StabilizerStateChForm':
copy = StabilizerStateChForm(self.n)
copy.G = self.G[axes][:, axes]
copy.F = self.F[axes][:, axes]
copy.M = self.M[axes][:, axes]
copy.gamma = self.gamma[axes]
copy.v = self.v[axes]
copy.s = self.s[axes]
copy.omega = self.omega
return copy
def apply_x(self, axis: int, exponent: float = 1, global_shift: float = 0):
if exponent % 2 != 0:
if exponent % 0.5 != 0.0:
raise ValueError('X exponent must be half integer') # coverage: ignore
self.apply_h(axis)
self.apply_z(axis, exponent)
self.apply_h(axis)
self.omega *= _phase(exponent, global_shift)
def apply_y(self, axis: int, exponent: float = 1, global_shift: float = 0):
if exponent % 0.5 != 0.0:
raise ValueError('Y exponent must be half integer') # coverage: ignore
shift = _phase(exponent, global_shift)
if exponent % 2 == 0:
self.omega *= shift
elif exponent % 2 == 0.5:
self.apply_z(axis)
self.apply_h(axis)
self.omega *= shift * (1 + 1j) / (2 ** 0.5)
elif exponent % 2 == 1:
self.apply_z(axis)
self.apply_h(axis)
self.apply_z(axis)
self.apply_h(axis)
self.omega *= shift * 1j
elif exponent % 2 == 1.5:
self.apply_h(axis)
self.apply_z(axis)
self.omega *= shift * (1 - 1j) / (2 ** 0.5)
def apply_z(self, axis: int, exponent: float = 1, global_shift: float = 0):
if exponent % 2 != 0:
if exponent % 0.5 != 0.0:
raise ValueError('Z exponent must be half integer') # coverage: ignore
effective_exponent = exponent % 2
for _ in range(int(effective_exponent * 2)):
# Prescription for S left multiplication.
# Reference: https://arxiv.org/abs/1808.00128 Proposition 4 end
self.M[axis, :] ^= self.G[axis, :]
self.gamma[axis] = (self.gamma[axis] - 1) % 4
self.omega *= _phase(exponent, global_shift)
def apply_h(self, axis: int, exponent: float = 1, global_shift: float = 0):
if exponent % 2 != 0:
if exponent % 1 != 0:
raise ValueError('H exponent must be integer') # coverage: ignore
# Prescription for H left multiplication
# Reference: https://arxiv.org/abs/1808.00128
# Equations 48, 49 and Proposition 4
t = self.s ^ (self.G[axis, :] & self.v)
u = self.s ^ (self.F[axis, :] & (~self.v)) ^ (self.M[axis, :] & self.v)
alpha = sum(self.G[axis, :] & (~self.v) & self.s) % 2
beta = sum(self.M[axis, :] & (~self.v) & self.s)
beta += sum(self.F[axis, :] & self.v & self.M[axis, :])
beta += sum(self.F[axis, :] & self.v & self.s)
beta %= 2
delta = (self.gamma[axis] + 2 * (alpha + beta)) % 4
self.update_sum(t, u, delta=delta, alpha=alpha)
self.omega *= _phase(exponent, global_shift)
def apply_cz(
self, control_axis: int, target_axis: int, exponent: float = 1, global_shift: float = 0
):
if exponent % 2 != 0:
if exponent % 1 != 0:
raise ValueError('CZ exponent must be integer') # coverage: ignore
# Prescription for CZ left multiplication.
# Reference: https://arxiv.org/abs/1808.00128 Proposition 4 end
self.M[control_axis, :] ^= self.G[target_axis, :]
self.M[target_axis, :] ^= self.G[control_axis, :]
self.omega *= _phase(exponent, global_shift)
def apply_cx(
self, control_axis: int, target_axis: int, exponent: float = 1, global_shift: float = 0
):
if exponent % 2 != 0:
if exponent % 1 != 0:
raise ValueError('CX exponent must be integer') # coverage: ignore
# Prescription for CX left multiplication.
# Reference: https://arxiv.org/abs/1808.00128 Proposition 4 end
self.gamma[control_axis] = (
self.gamma[control_axis]
+ self.gamma[target_axis]
+ 2 * (sum(self.M[control_axis, :] & self.F[target_axis, :]) % 2)
) % 4
self.G[target_axis, :] ^= self.G[control_axis, :]
self.F[control_axis, :] ^= self.F[target_axis, :]
self.M[control_axis, :] ^= self.M[target_axis, :]
self.omega *= _phase(exponent, global_shift)
def apply_global_phase(self, coefficient: value.Scalar):
self.omega *= coefficient
def _phase(exponent, global_shift):
return np.exp(1j * np.pi * global_shift * exponent)
| 35.242967
| 99
| 0.531495
|
80f2ad8b8013070800780b853580c29efacfc7f3
| 91,346
|
py
|
Python
|
test_init_final.py
|
wotjr1466/Test
|
a917055bab94180c511c575e9003e3c6b1c8b7dd
|
[
"Apache-2.0"
] | null | null | null |
test_init_final.py
|
wotjr1466/Test
|
a917055bab94180c511c575e9003e3c6b1c8b7dd
|
[
"Apache-2.0"
] | null | null | null |
test_init_final.py
|
wotjr1466/Test
|
a917055bab94180c511c575e9003e3c6b1c8b7dd
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
################ Server V15.1 #####################
import os
import sys
import asyncio
import discord
import datetime
import random
import math
import logging
from discord.ext import commands
from gtts import gTTS
from github import Github
import base64
import re #정산
import gspread #정산
from oauth2client.service_account import ServiceAccountCredentials #정산
from io import StringIO
import urllib.request
##################### 로깅 ###########################
log_stream = StringIO()
logging.basicConfig(stream=log_stream, level=logging.WARNING)
#ilsanglog = logging.getLogger('discord')
#ilsanglog.setLevel(level = logging.WARNING)
#handler = logging.StreamHandler()
#handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
#ilsanglog.addHandler(handler)
#####################################################
basicSetting = []
bossData = []
fixed_bossData = []
bossNum = 0
fixed_bossNum = 0
chkvoicechannel = 0
chkrelogin = 0
chflg = 0
LoadChk = 0
bossTime = []
tmp_bossTime = []
fixed_bossTime = []
bossTimeString = []
bossDateString = []
tmp_bossTimeString = []
tmp_bossDateString = []
bossFlag = []
bossFlag0 = []
fixed_bossFlag = []
fixed_bossFlag0 = []
bossMungFlag = []
bossMungCnt = []
channel_info = []
channel_name = []
channel_id = []
channel_voice_name = []
channel_voice_id = []
channel_type = []
FixedBossDateData = []
indexFixedBossname = []
client = discord.Client()
access_token = os.environ["BOT_TOKEN"]
git_access_token = os.environ["GIT_TOKEN"]
git_access_repo = os.environ["GIT_REPO"]
git_access_repo_restart = os.environ["GIT_REPO_RESTART"]
g = Github(git_access_token)
repo = g.get_repo(git_access_repo)
repo_restart = g.get_repo(git_access_repo_restart)
def init():
global basicSetting
global bossData
global fixed_bossData
global bossNum
global fixed_bossNum
global chkvoicechannel
global chkrelogin
global bossTime
global tmp_bossTime
global fixed_bossTime
global bossTimeString
global bossDateString
global tmp_bossTimeString
global tmp_bossDateString
global bossFlag
global bossFlag0
global fixed_bossFlag
global fixed_bossFlag0
global bossMungFlag
global bossMungCnt
global voice_client1
global channel_info
global channel_name
global channel_voice_name
global channel_voice_id
global channel_id
global channel_type
global LoadChk
global indexFixedBossname
global FixedBossDateData
global endTime
global gc #정산
global credentials #정산
global regenembed
global command
global kill_Data
global kill_Time
command = []
tmp_bossData = []
tmp_fixed_bossData = []
FixedBossDateData = []
indexFixedBossname = []
kill_Data = []
tmp_kill_Data = []
f = []
fb = []
fk = []
#print("test")
inidata = repo.get_contents("test_setting.ini")
file_data1 = base64.b64decode(inidata.content)
file_data1 = file_data1.decode('utf-8')
inputData = file_data1.split('\n')
command_inidata = repo.get_contents("command.ini")
file_data4 = base64.b64decode(command_inidata.content)
file_data4 = file_data4.decode('utf-8')
command_inputData = file_data4.split('\n')
boss_inidata = repo.get_contents("boss.ini")
file_data3 = base64.b64decode(boss_inidata.content)
file_data3 = file_data3.decode('utf-8')
boss_inputData = file_data3.split('\n')
fixed_inidata = repo.get_contents("fixed_boss.ini")
file_data2 = base64.b64decode(fixed_inidata.content)
file_data2 = file_data2.decode('utf-8')
fixed_inputData = file_data2.split('\n')
kill_inidata = repo.get_contents("kill_list.ini")
file_data5 = base64.b64decode(kill_inidata.content)
file_data5 = file_data5.decode('utf-8')
kill_inputData = file_data5.split('\n')
for i in range(len(fixed_inputData)):
FixedBossDateData.append(fixed_inputData[i])
index_fixed = 0
for value in FixedBossDateData:
if value.find('bossname') != -1:
indexFixedBossname.append(index_fixed)
index_fixed = index_fixed + 1
for i in range(inputData.count('\r')):
inputData.remove('\r')
for i in range(command_inputData.count('\r')):
command_inputData.remove('\r')
for i in range(boss_inputData.count('\r')):
boss_inputData.remove('\r')
for i in range(fixed_inputData.count('\r')):
fixed_inputData.remove('\r')
for i in range(kill_inputData.count('\r')):
kill_inputData.remove('\r')
del(command_inputData[0])
del(boss_inputData[0])
del(fixed_inputData[0])
del(kill_inputData[0])
############## 보탐봇 초기 설정 리스트 #####################
basicSetting.append(inputData[0][11:]) #basicSetting[0] : timezone
basicSetting.append(inputData[6][15:]) #basicSetting[1] : before_alert
basicSetting.append(inputData[8][10:]) #basicSetting[2] : mungChk
basicSetting.append(inputData[7][16:]) #basicSetting[3] : before_alert1
basicSetting.append(inputData[11][14:16]) #basicSetting[4] : restarttime 시
basicSetting.append(inputData[11][17:]) #basicSetting[5] : restarttime 분
basicSetting.append(inputData[1][15:]) #basicSetting[6] : voice채널 ID
basicSetting.append(inputData[2][14:]) #basicSetting[7] : text채널 ID
basicSetting.append(inputData[3][16:]) #basicSetting[8] : 사다리 채널 ID
basicSetting.append(inputData[10][14:]) #basicSetting[9] : !ㅂ 출력 수
basicSetting.append(inputData[14][11:]) #basicSetting[10] : json 파일명
basicSetting.append(inputData[4][17:]) #basicSetting[11] : 정산 채널 ID
basicSetting.append(inputData[13][12:]) #basicSetting[12] : sheet 이름
basicSetting.append(inputData[12][16:]) #basicSetting[13] : restart 주기
basicSetting.append(inputData[15][12:]) #basicSetting[14] : 시트 이름
basicSetting.append(inputData[16][12:]) #basicSetting[15] : 입력 셀
basicSetting.append(inputData[17][13:]) #basicSetting[16] : 출력 셀
basicSetting.append(inputData[9][13:]) #basicSetting[17] : 멍삭제횟수
basicSetting.append(inputData[5][14:]) #basicSetting[18] : kill채널 ID
############## 보탐봇 명령어 리스트 #####################
for i in range(len(command_inputData)):
command.append(command_inputData[i][12:].rstrip('\r')) #command[0] ~ [24] : 명령어
################## 척살 명단 ###########################
for i in range(len(kill_inputData)):
tmp_kill_Data.append(kill_inputData[i].rstrip('\r'))
fk.append(tmp_kill_Data[i][:tmp_kill_Data[i].find(' ')])
fk.append(tmp_kill_Data[i][tmp_kill_Data[i].find(' ')+1:tmp_kill_Data[i].find(' ')+2])
kill_Data.append(fk) #kill_Data[0] : 척살명단 kill_Data[1] : 죽은횟수
fk = []
tmp_killtime = datetime.datetime.now().replace(hour=int(5), minute=int(0), second = int(0))
kill_Time = datetime.datetime.now()
if tmp_killtime < kill_Time :
kill_Time = tmp_killtime + datetime.timedelta(days=int(1))
else:
kill_Time = tmp_killtime
for i in range(len(basicSetting)):
basicSetting[i] = basicSetting[i].strip()
if basicSetting[6] != "":
basicSetting[6] = int(basicSetting[6])
if basicSetting[7] != "":
basicSetting[7] = int(basicSetting[7])
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
if int(basicSetting[13]) == 0 :
endTime = tmp_now.replace(hour=int(basicSetting[4]), minute=int(basicSetting[5]), second = int(0))
endTime = endTime + datetime.timedelta(days=int(1000))
else :
endTime = tmp_now.replace(hour=int(basicSetting[4]), minute=int(basicSetting[5]), second = int(0))
if endTime < tmp_now :
endTime = endTime + datetime.timedelta(days=int(basicSetting[13]))
### 채널 고정###
#basicSetting[6] = int('597781866681991198') #보이스채널ID
#basicSetting[7] = int('597782016607649829') #택스트채널ID
bossNum = int(len(boss_inputData)/5)
fixed_bossNum = int(len(fixed_inputData)/6)
for i in range(bossNum):
tmp_bossData.append(boss_inputData[i*5:i*5+5])
for i in range(fixed_bossNum):
tmp_fixed_bossData.append(fixed_inputData[i*6:i*6+6])
#print (tmp_bossData)
for j in range(bossNum):
for i in range(len(tmp_bossData[j])):
tmp_bossData[j][i] = tmp_bossData[j][i].strip()
for j in range(fixed_bossNum):
for i in range(len(tmp_fixed_bossData[j])):
tmp_fixed_bossData[j][i] = tmp_fixed_bossData[j][i].strip()
############## 일반보스 정보 리스트 #####################
for j in range(bossNum):
tmp_len = tmp_bossData[j][1].find(':')
f.append(tmp_bossData[j][0][11:]) #bossData[0] : 보스명
f.append(tmp_bossData[j][1][10:tmp_len]) #bossData[1] : 시
f.append(tmp_bossData[j][2][13:]) #bossData[2] : 멍/미입력
f.append(tmp_bossData[j][3][20:]) #bossData[3] : 분전 알림멘트
f.append(tmp_bossData[j][4][13:]) #bossData[4] : 젠 알림멘트
f.append(tmp_bossData[j][1][tmp_len+1:]) #bossData[5] : 분
f.append('') #bossData[6] : 메세지
bossData.append(f)
f = []
bossTime.append(datetime.datetime.now()+datetime.timedelta(days=365, hours = int(basicSetting[0])))
tmp_bossTime.append(datetime.datetime.now()+datetime.timedelta(days=365, hours = int(basicSetting[0])))
bossTimeString.append('99:99:99')
bossDateString.append('9999-99-99')
tmp_bossTimeString.append('99:99:99')
tmp_bossDateString.append('9999-99-99')
bossFlag.append(False)
bossFlag0.append(False)
bossMungFlag.append(False)
bossMungCnt.append(0)
tmp_fixed_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
############## 고정보스 정보 리스트 #####################
for j in range(fixed_bossNum):
tmp_fixed_len = tmp_fixed_bossData[j][1].find(':')
tmp_fixedGen_len = tmp_fixed_bossData[j][2].find(':')
fb.append(tmp_fixed_bossData[j][0][11:]) #fixed_bossData[0] : 보스명
fb.append(tmp_fixed_bossData[j][1][11:tmp_fixed_len]) #fixed_bossData[1] : 시
fb.append(tmp_fixed_bossData[j][1][tmp_fixed_len+1:]) #fixed_bossData[2] : 분
fb.append(tmp_fixed_bossData[j][4][20:]) #fixed_bossData[3] : 분전 알림멘트
fb.append(tmp_fixed_bossData[j][5][13:]) #fixed_bossData[4] : 젠 알림멘트
fb.append(tmp_fixed_bossData[j][2][12:tmp_fixedGen_len]) #fixed_bossData[5] : 젠주기-시
fb.append(tmp_fixed_bossData[j][2][tmp_fixedGen_len+1:]) #fixed_bossData[6] : 젠주기-분
fb.append(tmp_fixed_bossData[j][3][12:16]) #fixed_bossData[7] : 시작일-년
fb.append(tmp_fixed_bossData[j][3][17:19]) #fixed_bossData[8] : 시작일-월
fb.append(tmp_fixed_bossData[j][3][20:22]) #fixed_bossData[9] : 시작일-일
fixed_bossData.append(fb)
fb = []
fixed_bossFlag.append(False)
fixed_bossFlag0.append(False)
fixed_bossTime.append(tmp_fixed_now.replace(year = int(fixed_bossData[j][7]), month = int(fixed_bossData[j][8]), day = int(fixed_bossData[j][9]), hour=int(fixed_bossData[j][1]), minute=int(fixed_bossData[j][2]), second = int(0)))
if fixed_bossTime[j] < tmp_fixed_now :
while fixed_bossTime[j] < tmp_fixed_now :
fixed_bossTime[j] = fixed_bossTime[j] + datetime.timedelta(hours=int(fixed_bossData[j][5]), minutes=int(fixed_bossData[j][6]), seconds = int(0))
################# 리젠보스 시간 정렬 ######################
regenData = []
regenTime = []
regenbossName = []
outputTimeHour = []
outputTimeMin = []
for i in range(bossNum):
f.append(bossData[i][0])
f.append(bossData[i][1] + bossData[i][5])
regenData.append(f)
regenTime.append(bossData[i][1] + bossData[i][5])
f = []
regenTime = sorted(list(set(regenTime)))
for j in range(len(regenTime)):
for i in range(len(regenData)):
if regenTime[j] == regenData[i][1] :
f.append(regenData[i][0])
regenbossName.append(f)
outputTimeHour.append(int(regenTime[j][:2]))
outputTimeMin.append(int(regenTime[j][2:]))
f = []
regenembed = discord.Embed(
title='----- 리스폰 보스 -----',
description= ' ')
for i in range(len(regenTime)):
if outputTimeMin[i] == 0 :
regenembed.add_field(name=str(outputTimeHour[i]) + '시간', value= '```'+ ', '.join(map(str, sorted(regenbossName[i]))) + '```', inline=False)
else :
regenembed.add_field(name=str(outputTimeHour[i]) + '시간' + str(outputTimeMin[i]) + '분', value= '```' + ','.join(map(str, sorted(regenbossName[i]))) + '```', inline=False)
##########################################################
if basicSetting[10] !="":
scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive'] #정산
credentials = ServiceAccountCredentials.from_json_keyfile_name(basicSetting[10], scope) #정산
init()
channel = ''
async def task():
await client.wait_until_ready()
global channel
global endTime
global basicSetting
global bossData
global fixed_bossData
global bossNum
global fixed_bossNum
global chkvoicechannel
global chkrelogin
global bossTime
global tmp_bossTime
global fixed_bossTime
global bossTimeString
global bossDateString
global tmp_bossTimeString
global tmp_bossDateString
global bossFlag
global bossFlag0
global fixed_bossFlag
global fixed_bossFlag0
global bossMungFlag
global bossMungCnt
global voice_client1
global channel_info
global channel_name
global channel_id
global channel_voice_name
global channel_voice_id
global channel_type
global endTime
global kill_Time
if chflg == 1 :
if voice_client1.is_connected() == False :
voice_client1 = await client.get_channel(basicSetting[6]).connect(reconnect=True)
if voice_client1.is_connected() :
await dbLoad()
await client.get_channel(channel).send( '< 언젠간 복수할테다! >', tts=False)
print("명치복구완료!")
while not client.is_closed():
if chflg == 1 :
if voice_client1.is_connected() == False :
voice_client1 = await client.get_channel(basicSetting[6]).connect(reconnect=True)
if voice_client1.is_connected() :
await dbLoad()
await client.get_channel(channel).send( '< 하지만 극뽁~ >', tts=False)
print("인중복구완료!")
while not client.is_closed():
############ 워닝잡자! ############
if log_stream.getvalue().find("Awaiting") != -1:
log_stream.truncate(0)
log_stream.seek(0)
await client.get_channel(channel).send( '< 디코접속에러! 잠깐 나갔다 올께요! >', tts=False)
for i in range(bossNum):
if bossMungFlag[i] == True:
bossTimeString[i] = tmp_bossTime[i].strftime('%H:%M:%S')
bossDateString[i] = tmp_bossTime[i].strftime('%Y-%m-%d')
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
await dbSave()
raise SystemExit
log_stream.truncate(0)
log_stream.seek(0)
##################################
now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
priv0 = now+datetime.timedelta(minutes=int(basicSetting[3]))
priv = now+datetime.timedelta(minutes=int(basicSetting[1]))
aftr = now+datetime.timedelta(minutes=int(0-int(basicSetting[2])))
if channel != '':
################ 보탐봇 재시작 ################
if endTime.strftime('%Y-%m-%d ') + endTime.strftime('%H:%M:%S') == now.strftime('%Y-%m-%d ') + now.strftime('%H:%M:%S'):
if basicSetting[2] != '0':
for i in range(bossNum):
if bossMungFlag[i] == True:
bossTimeString[i] = tmp_bossTime[i].strftime('%H:%M:%S')
bossDateString[i] = tmp_bossTime[i].strftime('%Y-%m-%d')
await dbSave()
await FixedBossDateSave()
await kill_list_Save()
#await client.get_channel(channel).send('<갑자기 인사해도 놀라지마세요!>', tts=False)
print("보탐봇재시작!")
endTime = endTime + datetime.timedelta(days = int(basicSetting[13]))
await asyncio.sleep(2)
inidata_restart = repo_restart.get_contents("restart.txt")
file_data_restart = base64.b64decode(inidata_restart.content)
file_data_restart = file_data_restart.decode('utf-8')
inputData_restart = file_data_restart.split('\n')
if len(inputData_restart) < 3:
contents12 = repo_restart.get_contents("restart.txt")
repo_restart.update_file(contents12.path, "restart_0", "restart\nrestart\nrestrat\n", contents12.sha)
else:
contents12 = repo_restart.get_contents("restart.txt")
repo_restart.update_file(contents12.path, "restart_1", "", contents12.sha)
################ 킬 목록 초기화 ################
if kill_Time.strftime('%Y-%m-%d ') + kill_Time.strftime('%H:%M') == now.strftime('%Y-%m-%d ') + now.strftime('%H:%M'):
kill_Time = kill_Time + datetime.timedelta(days=int(1))
await initkill_list()
################ 고정 보스 확인 ################
for i in range(fixed_bossNum):
################ before_alert1 ################
if fixed_bossTime[i] <= priv0 and fixed_bossTime[i] > priv:
if basicSetting[3] != '0':
if fixed_bossFlag0[i] == False:
fixed_bossFlag0[i] = True
await client.get_channel(channel).send("```" + fixed_bossData[i][0] + ' ' + basicSetting[3] + '분 전 ' + fixed_bossData[i][3] +' [' + fixed_bossTime[i].strftime('%H:%M:%S') + ']```', tts=False)
await PlaySound(voice_client1, './sound/' + fixed_bossData[i][0] + '알림1.mp3')
################ before_alert ################
if fixed_bossTime[i] <= priv and fixed_bossTime[i] > now:
if basicSetting[1] != '0' :
if fixed_bossFlag[i] == False:
fixed_bossFlag[i] = True
await client.get_channel(channel).send("```" + fixed_bossData[i][0] + ' ' + basicSetting[1] + '분 전 ' + fixed_bossData[i][3] +' [' + fixed_bossTime[i].strftime('%H:%M:%S') + ']```', tts=False)
await PlaySound(voice_client1, './sound/' + fixed_bossData[i][0] + '알림.mp3')
################ 보스 젠 시간 확인 ################
if fixed_bossTime[i] <= now :
fixed_bossTime[i] = fixed_bossTime[i]+datetime.timedelta(hours=int(fixed_bossData[i][5]), minutes=int(fixed_bossData[i][6]), seconds = int(0))
fixed_bossFlag0[i] = False
fixed_bossFlag[i] = False
embed = discord.Embed(
description= "```" + fixed_bossData[i][0] + fixed_bossData[i][4] + "```" ,
color=0x00ff00
)
await client.get_channel(channel).send(embed=embed, tts=False)
await PlaySound(voice_client1, './sound/' + fixed_bossData[i][0] + '젠.mp3')
################ 일반 보스 확인 ################
for i in range(bossNum):
################ before_alert1 ################
if bossTime[i] <= priv0 and bossTime[i] > priv:
if basicSetting[3] != '0':
if bossFlag0[i] == False:
bossFlag0[i] = True
if bossData[i][6] != '' :
await client.get_channel(channel).send("```" + bossData[i][0] + ' ' + basicSetting[3] + '분 전 ' + bossData[i][3] + " [" + bossTimeString[i] + "]" + '\n<' + bossData[i][6] + '>```', tts=False)
else :
await client.get_channel(channel).send("```" + bossData[i][0] + ' ' + basicSetting[3] + '분 전 ' + bossData[i][3] + " [" + bossTimeString[i] + "]```", tts=False)
await PlaySound(voice_client1, './sound/' + bossData[i][0] + '알림1.mp3')
################ before_alert ################
if bossTime[i] <= priv and bossTime[i] > now:
if basicSetting[1] != '0' :
if bossFlag[i] == False:
bossFlag[i] = True
if bossData[i][6] != '' :
await client.get_channel(channel).send("```" + bossData[i][0] + ' ' + basicSetting[1] + '분 전 ' + bossData[i][3] + " [" + bossTimeString[i] + "]" + '\n<' + bossData[i][6] + '>```', tts=False)
else :
await client.get_channel(channel).send("```" + bossData[i][0] + ' ' + basicSetting[1] + '분 전 ' + bossData[i][3] + " [" + bossTimeString[i] + "]```", tts=False)
await PlaySound(voice_client1, './sound/' + bossData[i][0] + '알림.mp3')
################ 보스 젠 시간 확인 ################
if bossTime[i] <= now :
#print ('if ', bossTime[i])
bossMungFlag[i] = True
tmp_bossTime[i] = bossTime[i]
tmp_bossTimeString[i] = tmp_bossTime[i].strftime('%H:%M:%S')
tmp_bossDateString[i] = tmp_bossTime[i].strftime('%Y-%m-%d')
bossTimeString[i] = '99:99:99'
bossDateString[i] = '9999-99-99'
bossTime[i] = now+datetime.timedelta(days=365)
if bossData[i][6] != '' :
embed = discord.Embed(
description= "```" + bossData[i][0] + bossData[i][4] + '\n<' + bossData[i][6] + '>```' ,
color=0x00ff00
)
else :
embed = discord.Embed(
description= "```" + bossData[i][0] + bossData[i][4] + "```" ,
color=0x00ff00
)
await client.get_channel(channel).send(embed=embed, tts=False)
await PlaySound(voice_client1, './sound/' + bossData[i][0] + '젠.mp3')
################ 보스 자동 멍 처리 ################
if bossMungFlag[i] == True:
if (bossTime[i]+datetime.timedelta(days=-365)) <= aftr:
if basicSetting[2] != '0':
if int(basicSetting[17]) <= bossMungCnt[i] and int(basicSetting[17]) != 0:
bossTime[i] = datetime.datetime.now()+datetime.timedelta(days=365, hours = int(basicSetting[0]))
tmp_bossTime[i] = datetime.datetime.now()+datetime.timedelta(days=365, hours = int(basicSetting[0]))
bossTimeString[i] = '99:99:99'
bossDateString[i] = '9999-99-99'
tmp_bossTimeString[i] = '99:99:99'
tmp_bossDateString[i] = '9999-99-99'
bossFlag[i] = (False)
bossFlag0[i] = (False)
bossMungFlag[i] = (False)
bossMungCnt[i] = 0
await client.get_channel(channel).send('```자동 멍처리 횟수 ' + basicSetting[17] + '회 초과! [' + bossData[i][0] + '] 삭제!```', tts=False)
await dbSave()
print ('자동멍처리 횟수초과 <' + bossData[i][0] + ' 삭제완료>')
else:
################ 미입력 보스 ################
if bossData[i][2] == '0':
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = bossMungCnt[i] + 1
tmp_bossTime[i] = bossTime[i] = nextTime = tmp_bossTime[i]+datetime.timedelta(hours=int(bossData[i][1]), minutes=int(bossData[i][5]))
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
await client.get_channel(channel).send("```" + bossData[i][0] + ' 미입력 됐습니다.```', tts=False)
embed = discord.Embed(
description= '```다음 ' + bossData[i][0] + ' ' + bossTimeString[i] + '입니다.```',
color=0xff0000
)
await client.get_channel(channel).send(embed=embed, tts=False)
await PlaySound(voice_client1, './sound/' + bossData[i][0] + '미입력.mp3')
################ 멍 보스 ################
else :
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = bossMungCnt[i] + 1
tmp_bossTime[i] = bossTime[i] = nextTime = tmp_bossTime[i]+datetime.timedelta(hours=int(bossData[i][1]), minutes=int(bossData[i][5]))
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
await client.get_channel(channel).send("```" + bossData[i][0] + ' 멍 입니다.```')
embed = discord.Embed(
description= '```다음 ' + bossData[i][0] + ' ' + bossTimeString[i] + '입니다.```',
color=0xff0000
)
await client.get_channel(channel).send(embed=embed, tts=False)
await PlaySound(voice_client1, './sound/' + bossData[i][0] + '멍.mp3')
await asyncio.sleep(1) # task runs every 60 seconds
#mp3 파일 생성함수(gTTS 이용, 남성목소리)
async def MakeSound(saveSTR, filename):
'''
tts = gTTS(saveSTR, lang = 'ko')
tts.save('./' + filename + '.mp3')
'''
try:
encText = urllib.parse.quote(saveSTR)
urllib.request.urlretrieve("https://clova.ai/proxy/voice/api/tts?text=" + encText + "%0A&voicefont=1&format=wav",filename + '.wav')
except Exception as e:
print (e)
tts = gTTS(saveSTR, lang = 'ko')
tts.save('./' + filename + '.wav')
pass
#mp3 파일 재생함수
async def PlaySound(voiceclient, filename):
source = discord.FFmpegPCMAudio(filename)
try:
voiceclient.play(source)
except discord.errors.ClientException:
while voiceclient.is_playing():
await asyncio.sleep(1)
while voiceclient.is_playing():
await asyncio.sleep(1)
voiceclient.stop()
source.cleanup()
#my_bot.db 저장하기
async def dbSave():
global bossData
global bossNum
global bossTime
global bossTimeString
global bossDateString
global bossMungCnt
for i in range(bossNum):
for j in range(bossNum):
if bossTimeString[i] and bossTimeString[j] != '99:99:99':
if bossTimeString[i] == bossTimeString[j] and i != j:
tmp_time1 = bossTimeString[j][:6]
tmp_time2 = (int(bossTimeString[j][6:]) + 1)%100
if tmp_time2 < 10 :
tmp_time22 = '0' + str(tmp_time2)
elif tmp_time2 == 60 :
tmp_time22 = '00'
else :
tmp_time22 = str(tmp_time2)
bossTimeString[j] = tmp_time1 + tmp_time22
datelist1 = bossTime
datelist = list(set(datelist1))
information1 = '----- 보스탐 정보 -----\n'
for timestring in sorted(datelist):
for i in range(bossNum):
if timestring == bossTime[i]:
if bossTimeString[i] != '99:99:99' :
if bossData[i][2] == '0' :
information1 += ' - ' + bossData[i][0] + '(' + bossData[i][1] + '.' + bossData[i][5] + ') : ' + bossTimeString[i] + ' @ ' + bossDateString[i] + ' (미입력 ' + str(bossMungCnt[i]) + '회)' + ' * ' + bossData[i][6] + '\n'
else :
information1 += ' - ' + bossData[i][0] + '(' + bossData[i][1] + '.' + bossData[i][5] + ') : ' + bossTimeString[i] + ' @ ' + bossDateString[i] + ' (멍 ' + str(bossMungCnt[i]) + '회)' + ' * ' + bossData[i][6] + '\n'
try :
contents = repo.get_contents("my_bot.db")
repo.update_file(contents.path, "bossDB", information1, contents.sha)
except GithubException as e :
print ('save error!!')
print(e.args[1]['message']) # output: This repository is empty.
errortime = datetime.datetime.now()
print (errortime)
pass
#my_bot.db 불러오기
async def dbLoad():
global LoadChk
contents1 = repo.get_contents("my_bot.db")
file_data = base64.b64decode(contents1.content)
file_data = file_data.decode('utf-8')
beforeBossData = file_data.split('\n')
if len(beforeBossData) > 1:
for i in range(len(beforeBossData)-1):
for j in range(bossNum):
startPos = beforeBossData[i+1].find('-')
endPos = beforeBossData[i+1].find('(')
if beforeBossData[i+1][startPos+2:endPos] == bossData[j][0] :
#if beforeBossData[i+1].find(bossData[j][0]) != -1 :
tmp_mungcnt = 0
tmp_len = beforeBossData[i+1].find(':')
tmp_datelen = beforeBossData[i+1].find('@')
tmp_msglen = beforeBossData[i+1].find('*')
years1 = beforeBossData[i+1][tmp_datelen+2:tmp_datelen+6]
months1 = beforeBossData[i+1][tmp_datelen+7:tmp_datelen+9]
days1 = beforeBossData[i+1][tmp_datelen+10:tmp_datelen+12]
hours1 = beforeBossData[i+1][tmp_len+2:tmp_len+4]
minutes1 = beforeBossData[i+1][tmp_len+5:tmp_len+7]
seconds1 = beforeBossData[i+1][tmp_len+8:tmp_len+10]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(year = int(years1), month = int(months1), day = int(days1), hour=int(hours1), minute=int(minutes1), second = int(seconds1))
if tmp_now < now2 :
deltaTime = datetime.timedelta(hours = int(bossData[j][1]), minutes = int(bossData[j][5]))
while now2 > tmp_now :
tmp_now = tmp_now + deltaTime
tmp_mungcnt = tmp_mungcnt + 1
now2 = tmp_now
tmp_bossTime[j] = bossTime[j] = now2
tmp_bossTimeString[j] = bossTimeString[j] = bossTime[j].strftime('%H:%M:%S')
tmp_bossDateString[j] = bossDateString[j] = bossTime[j].strftime('%Y-%m-%d')
bossData[j][6] = beforeBossData[i+1][tmp_msglen+2:len(beforeBossData[i+1])]
if beforeBossData[i+1][tmp_msglen-4:tmp_msglen-3] != 0 and beforeBossData[i+1][tmp_msglen-5:tmp_msglen-4] == ' ':
bossMungCnt[j] = int(beforeBossData[i+1][tmp_msglen-4:tmp_msglen-3]) + tmp_mungcnt
elif beforeBossData[i+1][tmp_msglen-5:tmp_msglen-4] != ' ':
bossMungCnt[j] = int(beforeBossData[i+1][tmp_msglen-5:tmp_msglen-4] + beforeBossData[i+1][tmp_msglen-4:tmp_msglen-3]) + tmp_mungcnt
else:
bossMungCnt[j] = 0
LoadChk = 0
print ("<불러오기 완료>")
else:
#await client.get_channel(channel).send('<보스타임 정보가 없습니다.>', tts=False)
LoadChk = 1
print ("보스타임 정보가 없습니다.")
#고정보스 날짜저장
async def FixedBossDateSave():
global fixed_bossData
global fixed_bossTime
global fixed_bossNum
global FixedBossDateData
global indexFixedBossname
for i in range(fixed_bossNum):
FixedBossDateData[indexFixedBossname[i] + 3] = 'startDate = '+ fixed_bossTime[i].strftime('%Y-%m-%d') + '\n'
FixedBossDateDataSTR = ""
for j in range(len(FixedBossDateData)):
pos = len(FixedBossDateData[j])
tmpSTR = FixedBossDateData[j][:pos-1] + '\r\n'
FixedBossDateDataSTR += tmpSTR
contents = repo.get_contents("fixed_boss.ini")
repo.update_file(contents.path, "bossDB", FixedBossDateDataSTR, contents.sha)
#음성채널 입장
async def JointheVC(VCchannel, TXchannel):
global chkvoicechannel
global voice_client1
if VCchannel is not None:
if chkvoicechannel == 0:
voice_client1 = await VCchannel.connect(reconnect=True)
if voice_client1.is_connected():
await voice_client1.disconnect()
voice_client1 = await VCchannel.connect(reconnect=True)
chkvoicechannel = 1
#await PlaySound(voice_client1, './sound/hello.mp3')
else :
await voice_client1.disconnect()
voice_client1 = await VCchannel.connect(reconnect=True)
#await PlaySound(voice_client1, './sound/hello.mp3')
else:
await TXchannel.send('음성채널에 먼저 들어가주세요.', tts=False)
#사다리함수
async def LadderFunc(number, ladderlist, channelVal):
if number < len(ladderlist):
result_ladder = random.sample(ladderlist, number)
result_ladderSTR = ','.join(map(str, result_ladder))
embed = discord.Embed(
title = "----- 당첨! -----",
description= '```' + result_ladderSTR + '```',
color=0xff00ff
)
await channelVal.send(embed=embed, tts=False)
else:
await channelVal.send('```추첨인원이 총 인원과 같거나 많습니다. 재입력 해주세요```', tts=False)
#킬초기화
async def initkill_list():
global kill_Data
kill_Data = []
try :
contents = repo.get_contents("kill_list.ini")
repo.update_file(contents.path, "kill list", '-----척살명단-----', contents.sha)
except GithubException as e :
print ('save error!!')
print(e.args[1]['message']) # output: This repository is empty.
errortime = datetime.datetime.now()
print (errortime)
pass
#킬목록저장
async def kill_list_Save():
global kill_Data
output_kill_list = '-----척살명단-----\n'
for i in range(len(kill_Data)):
if kill_Data[i][0] != '':
output_kill_list += str(kill_Data[i][0]) + ' ' + str(kill_Data[i][1]) + '\n'
try :
contents = repo.get_contents("kill_list.ini")
repo.update_file(contents.path, "kill list", output_kill_list, contents.sha)
except GithubException as e :
print ('save error!!')
print(e.args[1]['message']) # output: This repository is empty.
errortime = datetime.datetime.now()
print (errortime)
pass
## 명치 예외처리
def handle_exit():
#print("Handling")
client.loop.run_until_complete(client.logout())
for t in asyncio.Task.all_tasks(loop=client.loop):
if t.done():
#t.exception()
try:
#print ('try : ', t)
t.exception()
except asyncio.CancelledError:
#print ('cancel : ', t)
continue
continue
t.cancel()
try:
client.loop.run_until_complete(asyncio.wait_for(t, 5, loop=client.loop))
t.exception()
except asyncio.InvalidStateError:
pass
except asyncio.TimeoutError:
pass
except asyncio.CancelledError:
pass
# 봇이 구동되었을 때 동작되는 코드입니다.
@client.event
async def on_ready():
global channel
global channel_info
global channel_name
global channel_id
global channel_voice_name
global channel_voice_id
global channel_type
global chkvoicechannel
global chflg
global endTime
print("Logged in as ") #화면에 봇의 아이디, 닉네임이 출력됩니다.
print(client.user.name)
print(client.user.id)
print("===========")
#await joinVoiceChannel()
all_channels = client.get_all_channels()
for channel1 in all_channels:
channel_type.append(str(channel1.type))
channel_info.append(channel1)
for i in range(len(channel_info)):
if channel_type[i] == "text":
channel_name.append(str(channel_info[i].name))
channel_id.append(str(channel_info[i].id))
for i in range(len(channel_info)):
if channel_type[i] == "voice":
channel_voice_name.append(str(channel_info[i].name))
channel_voice_id.append(str(channel_info[i].id))
await dbLoad()
if basicSetting[6] != "" and basicSetting[7] != "" :
#print ('join channel')
await JointheVC(client.get_channel(basicSetting[6]), client.get_channel(basicSetting[7]))
channel = basicSetting[7]
chflg = 1
print('< 텍스트채널 [' + client.get_channel(basicSetting[7]).name + '] 접속완료>')
print('< 음성채널 [' + client.get_channel(basicSetting[6]).name + '] 접속완료>')
if basicSetting[8] != "":
print('< 사다리채널 [' + client.get_channel(int(basicSetting[8])).name + '] 접속완료>')
if basicSetting[11] != "":
print('< 정산채널 [' + client.get_channel(int(basicSetting[11])).name + '] 접속완료>')
if basicSetting[18] != "":
print('< 척살채널 [' + client.get_channel(int(basicSetting[18])).name + '] 접속완료>')
if int(basicSetting[13]) != 0 :
print('< 보탐봇 재시작 시간 ' + endTime.strftime('%Y-%m-%d ') + endTime.strftime('%H:%M:%S') + ' >')
print('< 보탐봇 재시작 주기 ' + basicSetting[13] + '일 >')
else :
print('< 보탐봇 재시작 설정안됨 >')
# 디스코드에는 현재 본인이 어떤 게임을 플레이하는지 보여주는 기능이 있습니다.
# 이 기능을 사용하여 봇의 상태를 간단하게 출력해줄 수 있습니다.
await client.change_presence(status=discord.Status.dnd, activity=discord.Game(name="!메뉴", type=1), afk=False)
while True:
# 봇이 새로운 메시지를 수신했을때 동작되는 코드입니다.
@client.event
async def on_message(msg):
if msg.author.bot: #만약 메시지를 보낸사람이 봇일 경우에는
return None #동작하지 않고 무시합니다.
global channel
global basicSetting
global bossData
global fixed_bossData
global bossNum
global fixed_bossNum
global chkvoicechannel
global chkrelogin
global bossTime
global tmp_bossTime
global fixed_bossTime
global bossTimeString
global bossDateString
global tmp_bossTimeString
global tmp_bossDateString
global bossFlag
global bossFlag0
global bossMungFlag
global bossMungCnt
global voice_client1
global channel_info
global channel_name
global channel_id
global channel_voice_name
global channel_voice_id
global channel_type
global chflg
global LoadChk
global indexFixedBossname
global FixedBossDateData
global gc #정산
global credentials #정산
global regenembed
global command
global kill_Data
id = msg.author.id #id라는 변수에는 메시지를 보낸사람의 ID를 담습니다.
if chflg == 0 :
channel = int(msg.channel.id) #channel이라는 변수에는 메시지를 받은 채널의 ID를 담습니다
if basicSetting[7] == "":
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
for i in range(len(inputData_textCH)):
if inputData_textCH[i] == 'textchannel = \r':
inputData_textCH[i] = 'textchannel = ' + str(channel) + '\r'
basicSetting[7] = channel
#print ('======', inputData_text[i])
result_textCH = '\n'.join(inputData_textCH)
#print (result_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
print('< 텍스트채널 [' + client.get_channel(channel).name + '] 접속완료>')
if basicSetting[6] != "":
#print ('join channel')
await JointheVC(client.get_channel(basicSetting[6]), channel)
print('< 음성채널 [' + client.get_channel(basicSetting[6]).name + '] 접속완료>')
if basicSetting[8] != "":
print('< 사다리채널 [' + client.get_channel(int(basicSetting[8])).name + '] 접속완료>')
if basicSetting[11] != "":
print('< 정산채널 [' + client.get_channel(int(basicSetting[11])).name + '] 접속완료>')
if basicSetting[18] != "":
print('< 척살채널 [' + client.get_channel(int(basicSetting[18])).name + '] 접속완료>')
if int(basicSetting[13]) != 0 :
print('< 보탐봇 재시작 시간 ' + endTime.strftime('%Y-%m-%d ') + endTime.strftime('%H:%M:%S') + ' >')
print('< 보탐봇 재시작 주기 ' + basicSetting[13] + '일 >')
else :
print('< 보탐봇 재시작 설정안됨 >')
chflg = 1
if client.get_channel(channel) != msg.channel:
################ 사다리 특정 채널에서 하기 ################
if basicSetting[8] != "":
if msg.channel.id == int(basicSetting[8]): #### 사다리 채널ID 값넣으면 됨
message = await msg.channel.fetch_message(msg.id)
if message.content.startswith(command[11]):
ladder = []
ladder = message.content[len(command[11])+1:].split(" ")
num_cong = int(ladder[0])
del(ladder[0])
await LadderFunc(num_cong, ladder, msg.channel)
################ 정산 특정 채널에서 하기 ################
if basicSetting[11] != "":
if msg.channel.id == int(basicSetting[11]) : #### 정산채널 채널ID 값넣으면 됨
message = await msg.channel.fetch_message(msg.id)
################ 정산확인 ################
if message.content.startswith(command[12]):
if basicSetting[10] !="" and basicSetting[12] !="" and basicSetting[14] !="" and basicSetting[15] !="" and basicSetting[16] !="" :
SearchID = message.content[len(command[12])+1:]
gc = gspread.authorize(credentials)
wks = gc.open(basicSetting[12]).worksheet(basicSetting[14])
wks.update_acell(basicSetting[15], SearchID)
result = wks.acell(basicSetting[16]).value
embed = discord.Embed(
description= '```' + SearchID + ' 님이 받을 다이야는 ' + result + ' 다이야 입니다.```',
color=0xff00ff
)
await msg.channel.send(embed=embed, tts=False)
################ 킬 확인 특정 채널에서 하기 ################
if basicSetting[18] != "":
if msg.channel.id == int(basicSetting[18]) : #### 킬 채널ID 값넣으면 됨
message = await msg.channel.fetch_message(msg.id)
################ 킬초기화 ################
if message.content == command[22]:
await initkill_list()
await msg.channel.send( '< 킬 목록 초기화완료 >', tts=False)
################ 킬명단 확인 ################
if message.content == command[23]:
kill_output = ''
for i in range(len(kill_Data)):
if kill_Data[i][0] != '':
kill_output += ':skull_crossbones: ' + str(kill_Data[i][0]) + ' : ' + str(kill_Data[i][1]) + '번 따히!\n'
if kill_output != '' :
embed = discord.Embed(
description= str(kill_output),
color=0xff00ff
)
else :
embed = discord.Embed(
description= '등록된 킬 목록이 없습니다. 분발하세요!',
color=0xff00ff
)
await msg.channel.send(embed=embed, tts=False)
################ 킬등록 ################
if message.content.startswith(command[23]+' '):
tmp_sayMessage = message.content
sayMessage = tmp_sayMessage[len(command[23])+1:]
tmp_fk = []
listchk = 0
if sayMessage != ' ':
for i in range(len(kill_Data)):
if sayMessage == kill_Data[i][0]:
kill_Data[i][1] = int(kill_Data[i][1]) + 1
listchk = 1
if listchk == 0:
tmp_fk.append(sayMessage)
tmp_fk.append(1)
kill_Data.append(tmp_fk)
tmp_fk = []
embed = discord.Embed(
description= ':skull_crossbones:' + sayMessage + ' 따히!\n',
color=0xff00ff
)
await msg.channel.send(embed=embed, tts=False)
else:
await msg.channel.send( '```제대로 된 아이디를 입력해주세요!\n```', tts=False)
################ 킬삭제 ################
if message.content.startswith(command[24]+' '):
tmp_sayMessage = message.content
sayMessage = tmp_sayMessage[len(command[24])+1:]
tmp_fk = []
indexchk = 0
if sayMessage != ' ':
for i in range(len(kill_Data)):
if sayMessage == kill_Data[i][0]:
indexchk = i + 1
if indexchk != 0:
del(kill_Data[indexchk-1])
await msg.channel.send( '```<' + sayMessage + '> 킬 목록 삭제완료!\n```', tts=False)
else :
await msg.channel.send( '```킬 목록에 등록되어 있지 않습니다!\n```', tts=False)
else:
await msg.channel.send( '```제대로 된 아이디를 입력해주세요!\n```', tts=False)
else :
message = await client.get_channel(channel).fetch_message(msg.id)
################ 텍스트 정보확인 ################
if message.content == command[2]:
ch_information = []
cnt = 0
ch_information.append('')
for i in range(len(channel_name)):
if len(ch_information[cnt]) > 1000 :
ch_information.append('')
cnt += 1
ch_information[cnt] = ch_information[cnt] + '[' + channel_id[i] + '] ' + channel_name[i] + '\n'
ch_voice_information = []
cntV = 0
ch_voice_information.append('')
for i in range(len(channel_voice_name)):
if len(ch_voice_information[cntV]) > 1000 :
ch_voice_information.append('')
cntV += 1
ch_voice_information[cntV] = ch_voice_information[cntV] + '[' + channel_voice_id[i] + '] ' + channel_voice_name[i] + '\n'
'''
for i in range(len(ch_information)):
print ("--------------------------")
print (ch_information[i])
print (len(ch_information[i]))
print (len(ch_information))
for i in range(len(ch_voice_information)):
print ("+++++++++++++++++++++++++")
print (ch_voice_information[i])
print (len(ch_voice_information[i]))
print (len(ch_voice_information))
'''
if len(ch_information) == 1 and len(ch_voice_information) == 1:
embed = discord.Embed(
title = "----- 채널 정보 -----",
description= '',
color=0xff00ff
)
embed.add_field(
name="< 택스트 채널 >",
value= '```' + ch_information[0] + '```',
inline = False
)
embed.add_field(
name="< 보이스 채널 >",
value= '```' + ch_voice_information[0] + '```',
inline = False
)
await client.get_channel(channel).send( embed=embed, tts=False)
else :
embed = discord.Embed(
title = "----- 채널 정보 -----\n< 택스트 채널 >",
description= '```' + ch_information[0] + '```',
color=0xff00ff
)
await client.get_channel(channel).send( embed=embed, tts=False)
for i in range(len(ch_information)-1):
embed = discord.Embed(
title = '',
description= '```' + ch_information[i+1] + '```',
color=0xff00ff
)
await client.get_channel(channel).send( embed=embed, tts=False)
embed = discord.Embed(
title = "< 음성 채널 >",
description= '```' + ch_voice_information[0] + '```',
color=0xff00ff
)
await client.get_channel(channel).send( embed=embed, tts=False)
for i in range(len(ch_voice_information)-1):
embed = discord.Embed(
title = '',
description= '```' + ch_voice_information[i+1] + '```',
color=0xff00ff
)
await client.get_channel(channel).send( embed=embed, tts=False)
################ 텍스트채널이동 ################
if message.content.startswith(command[3]):
tmp_sayMessage1 = message.content
for i in range(len(channel_name)):
if channel_name[i] == str(tmp_sayMessage1[len(command[3])+1:]):
channel = int(channel_id[i])
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
for i in range(len(inputData_textCH)):
if inputData_textCH[i] == 'textchannel = ' + str(basicSetting[7]) + '\r':
inputData_textCH[i] = 'textchannel = ' + str(channel) + '\r'
basicSetting[7] = int(channel)
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
await client.get_channel(channel).send('< ' + client.get_channel(channel).name + ' 이동완료>', tts=False)
hello = message.content
##################################
for i in range(bossNum):
################ 보스 컷처리 ################
if message.content.startswith(bossData[i][0] +'컷'):
if hello.find(' ') != -1 :
bossData[i][6] = hello[hello.find(' ')+2:]
hello = hello[:hello.find(' ')]
else:
bossData[i][6] = ''
tmp_msg = bossData[i][0] +'컷'
if len(hello) > len(tmp_msg) + 3 :
if hello.find(':') != -1 :
chkpos = hello.find(':')
hours1 = hello[chkpos-2:chkpos]
minutes1 = hello[chkpos+1:chkpos+3]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
chkpos = len(hello)-2
hours1 = hello[chkpos-2:chkpos]
minutes1 = hello[chkpos:chkpos+2]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = now2
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = 0
if tmp_now > now2 :
tmp_now = tmp_now + datetime.timedelta(days=int(-1))
if tmp_now < now2 :
deltaTime = datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
while now2 > tmp_now :
tmp_now = tmp_now + deltaTime
bossMungCnt[i] = bossMungCnt[i] + 1
now2 = tmp_now
bossMungCnt[i] = bossMungCnt[i] - 1
else :
now2 = now2 + datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
tmp_bossTime[i] = bossTime[i] = nextTime = now2
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
embed = discord.Embed(
description= '```다음 ' + bossData[i][0] + ' ' + bossTimeString[i] + '입니다.```',
color=0xff0000
)
await client.get_channel(channel).send(embed=embed, tts=False)
################ 보스 멍 처리 ################
if message.content.startswith(bossData[i][0] +'멍'):
if hello.find(' ') != -1 :
bossData[i][6] = hello[hello.find(' ')+2:]
hello = hello[:hello.find(' ')]
else:
bossData[i][6] = ''
tmp_msg = bossData[i][0] +'멍'
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
if len(hello) > len(tmp_msg) + 3 :
if hello.find(':') != -1 :
chkpos = hello.find(':')
hours1 = hello[chkpos-2:chkpos]
minutes1 = hello[chkpos+1:chkpos+3]
temptime = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
chkpos = len(hello)-2
hours1 = hello[chkpos-2:chkpos]
minutes1 = hello[chkpos:chkpos+2]
temptime = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
nextTime = temptime + datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
bossMungCnt[i] = 0
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = bossMungCnt[i] + 1
if nextTime > tmp_now :
nextTime = nextTime + datetime.timedelta(days=int(-1))
if nextTime < tmp_now :
deltaTime = datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
while tmp_now > nextTime :
nextTime = nextTime + deltaTime
bossMungCnt[i] = bossMungCnt[i] + 1
else :
nextTime = nextTime
tmp_bossTime[i] = bossTime[i] = nextTime
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
embed = discord.Embed(
description= '```다음 ' + bossData[i][0] + ' ' + bossTimeString[i] + '입니다.```',
color=0xff0000
)
await client.get_channel(channel).send(embed=embed, tts=False)
else:
if tmp_bossTime[i] < tmp_now :
nextTime = tmp_bossTime[i] + datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = bossMungCnt[i] + 1
tmp_bossTime[i] = bossTime[i] = nextTime
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
embed = discord.Embed(
description= '```다음 ' + bossData[i][0] + ' ' + bossTimeString[i] + '입니다.```',
color=0xff0000
)
await client.get_channel(channel).send(embed=embed, tts=False)
else:
await client.get_channel(channel).send('```' + bossData[i][0] + '탐이 아직 안됐습니다. 다음 ' + bossData[i][0] + '탐 [' + tmp_bossTimeString[i] + '] 입니다```', tts=False)
################ 예상 보스 타임 입력 ################
if message.content.startswith(bossData[i][0] +'예상'):
if hello.find(' ') != -1 :
bossData[i][6] = hello[hello.find(' ')+2:]
hello = hello[:hello.find(' ')]
else:
bossData[i][6] = ''
tmp_msg = bossData[i][0] +'예상'
if len(hello) > len(tmp_msg) + 3 :
if hello.find(':') != -1 :
chkpos = hello.find(':')
hours1 = hello[chkpos-2:chkpos]
minutes1 = hello[chkpos+1:chkpos+3]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
chkpos = len(hello)-2
hours1 = hello[chkpos-2:chkpos]
minutes1 = hello[chkpos:chkpos+2]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = 0
if tmp_now < now2 :
tmp_now = tmp_now + datetime.timedelta(days=int(1))
tmp_bossTime[i] = bossTime[i] = nextTime = tmp_now
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
embed = discord.Embed(
description= '```다음 ' + bossData[i][0] + ' ' + bossTimeString[i] + '입니다.```',
color=0xff0000
)
await client.get_channel(channel).send(embed=embed, tts=False)
else:
await client.get_channel(channel).send('```' + bossData[i][0] +' 예상 시간을 입력해주세요.```', tts=False)
################ 보스타임 삭제 ################
if message.content == bossData[i][0] +'삭제':
bossTime[i] = datetime.datetime.now()+datetime.timedelta(days=365, hours = int(basicSetting[0]))
tmp_bossTime[i] = datetime.datetime.now()+datetime.timedelta(days=365, hours = int(basicSetting[0]))
bossTimeString[i] = '99:99:99'
bossDateString[i] = '9999-99-99'
tmp_bossTimeString[i] = '99:99:99'
tmp_bossDateString[i] = '9999-99-99'
bossFlag[i] = (False)
bossFlag0[i] = (False)
bossMungFlag[i] = (False)
bossMungCnt[i] = 0
await client.get_channel(channel).send('<' + bossData[i][0] + ' 삭제완료>', tts=False)
await dbSave()
print ('<' + bossData[i][0] + ' 삭제완료>')
################ 보스별 메모 ################
if message.content.startswith(bossData[i][0] +'메모 '):
tmp_msg = bossData[i][0] +'메모 '
bossData[i][6] = hello[len(tmp_msg):]
await client.get_channel(channel).send('< ' + bossData[i][0] + ' [ ' + bossData[i][6] + ' ] 메모등록 완료>', tts=False)
if message.content.startswith(bossData[i][0] +'메모삭제'):
bossData[i][6] = ''
await client.get_channel(channel).send('< ' + bossData[i][0] + ' 메모삭제 완료>', tts=False)
################ ?????????????? ################
if message.content == '!오빠' :
await PlaySound(voice_client1, './sound/오빠.mp3')
if message.content == '!언니' :
await PlaySound(voice_client1, './sound/언니.mp3')
if message.content == '!형' :
await PlaySound(voice_client1, './sound/형.mp3')
if message.content == '!TJ' or message.content == '!tj' :
resultTJ = random.randrange(1,9)
await PlaySound(voice_client1, './sound/TJ' + str(resultTJ) +'.mp3')
################ 분배 결과 출력 ################
if message.content.startswith(command[10]):
separate_money = []
separate_money = message.content[len(command[10])+1:].split(" ")
num_sep = int(separate_money[0])
cal_tax1 = math.ceil(float(separate_money[1])*0.05)
real_money = int(int(separate_money[1]) - cal_tax1)
cal_tax2 = int(real_money/num_sep) - math.ceil(float(int(real_money/num_sep))*0.95)
if num_sep == 0 :
await client.get_channel(channel).send('```분배 인원이 0입니다. 재입력 해주세요.```', tts=False)
else :
await client.get_channel(channel).send('```1차세금 : ' + str(cal_tax1) + '\n1차 수령액 : ' + str(real_money) + '\n분배자 거래소등록금액 : ' + str(int(real_money/num_sep)) + '\n2차세금 : ' + str(cal_tax2) + '\n인당 실수령액 : ' + str(int(float(int(real_money/num_sep))*0.95)) + '```', tts=False)
################ 사다리 결과 출력 ################
if message.content.startswith(command[11]):
ladder = []
ladder = message.content[len(command[11])+1:].split(" ")
num_cong = int(ladder[0])
del(ladder[0])
await LadderFunc(num_cong, ladder, client.get_channel(channel))
################ 보탐봇 메뉴 출력 ################
if message.content == command[0]:
command_list = ''
command_list += command[1] + '\n' #!설정확인
command_list += command[2] + '\n' #!채널확인
command_list += command[3] + ' [채널명]\n' #!채널이동
command_list += command[4] + '\n' #!소환
command_list += command[5] + '\n' #!불러오기
command_list += command[6] + '\n' #!초기화
command_list += command[7] + '\n' #!명치,!인중
command_list += command[8] + '\n' #!재시작
command_list += command[9] + '\n' #!미예약
command_list += command[10] + ' [인원] [금액]\n' #!분배
command_list += command[11] + ' [뽑을인원수] [아이디1] [아이디2]...\n' #!사다리
command_list += command[12] + ' [아이디]\n' #!정산
command_list += command[13] + ' 또는 ' + command[14] + ' 0000, 00:00\n' #!보스일괄
command_list += command[14] + '\n' #!q
command_list += command[15] + ' [할말]\n' #!v
command_list += command[16] + '\n' #!리젠
command_list += command[17] + '\n' #!현재시간
command_list += command[22] + '\n' #!킬초기화
command_list += command[23] + '\n' #!킬횟수 확인
command_list += command[23] + ' [아이디]\n' #!킬
command_list += command[24] + ' [아이디]\n' #!킬삭제
command_list += command[18] + '\n' #!공지
command_list += command[18] + ' [공지내용]\n' #!공지
command_list += command[18] + '삭제\n' #!공지
command_list += command[19] + ' [할말]\n\n' #!상태
command_list += command[20] + '\n' #보스탐
command_list += command[21] + '\n' #!보스탐
command_list += '[보스명]컷 또는 [보스명]컷 0000, 00:00\n'
command_list += '[보스명]멍 또는 [보스명]멍 0000, 00:00\n'
command_list += '[보스명]예상 또는 [보스명]예상 0000, 00:00\n'
command_list += '[보스명]삭제\n'
command_list += '[보스명]메모 [할말]\n'
embed = discord.Embed(
title = "----- 명령어 -----",
description= '```' + command_list + '```',
color=0xff00ff
)
embed.add_field(
name="----- 추가기능 -----",
value= '```[보스명]컷/멍/예상 [할말] : 보스시간 입력 후 빈칸 두번!! 메모 가능```'
)
await client.get_channel(channel).send( embed=embed, tts=False)
################ 미예약 보스타임 출력 ################
if message.content == command[9]:
tmp_boss_information = []
tmp_cnt = 0
tmp_boss_information.append('')
for i in range(bossNum):
if bossTimeString[i] == '99:99:99' and bossMungFlag[i] != True :
if len(tmp_boss_information[tmp_cnt]) > 1800 :
tmp_boss_information.append('')
tmp_cnt += 1
tmp_boss_information[tmp_cnt] = tmp_boss_information[tmp_cnt] + bossData[i][0] + ','
if len(tmp_boss_information) == 1:
if len(tmp_boss_information[0]) != 0:
tmp_boss_information[0] = "```fix\n" + tmp_boss_information[0][:len(tmp_boss_information[0])-1] + "\n```"
else :
tmp_boss_information[0] = '``` ```'
embed = discord.Embed(
title = "----- 미예약 보스 -----",
description= tmp_boss_information[0],
color=0x0000ff
)
await client.get_channel(channel).send( embed=embed, tts=False)
else:
if len(tmp_boss_information[0]) != 0:
if len(tmp_boss_information) == 1 :
tmp_boss_information[0] = "```fix\n" + tmp_boss_information[0][:len(tmp_boss_information[0])-1] + "\n```"
else:
tmp_boss_information[0] = "```fix\n" + tmp_boss_information[0] + "\n```"
else :
tmp_boss_information[0] = '``` ```'
embed = discord.Embed(
title = "----- 미예약 보스 -----",
description= tmp_boss_information[0],
color=0x0000ff
)
await client.get_channel(channel).send( embed=embed, tts=False)
for i in range(len(tmp_boss_information)-1):
if len(tmp_boss_information[i+1]) != 0:
if i == len(tmp_boss_information)-2:
tmp_boss_information[i+1] = "```fix\n" + tmp_boss_information[i+1][:len(tmp_boss_information[i+1])-1] + "\n```"
else:
tmp_boss_information[i+1] = "```fix\n" + tmp_boss_information[i+1] + "\n```"
else :
tmp_boss_information[i+1] = '``` ```'
embed = discord.Embed(
title = '',
description= tmp_boss_information[i+1],
color=0x0000ff
)
await client.get_channel(channel).send( embed=embed, tts=False)
################ 음성파일 생성 후 재생 ################
if message.content.startswith(command[15]) or message.content.startswith('!ㅍ') or message.content.startswith('!V'):
tmp_sayMessage = message.content
sayMessage = tmp_sayMessage[len(command[15])+1:]
await MakeSound(message.author.display_name +'님이.' + sayMessage, './sound/say')
await client.get_channel(channel).send("```< " + msg.author.display_name + " >님이 \"" + sayMessage + "\"```", tts=False)
await PlaySound(voice_client1, './sound/say.wav')
################ 보탐봇 재시작 ################
if message.content == command[8] :
if basicSetting[2] != '0':
for i in range(bossNum):
if bossMungFlag[i] == True:
bossTimeString[i] = tmp_bossTime[i].strftime('%H:%M:%S')
bossDateString[i] = tmp_bossTime[i].strftime('%Y-%m-%d')
await dbSave()
await kill_list_Save()
#await FixedBossDateSave()
#await client.get_channel(channel).send('<보탐봇 재시작 중... 갑자기 인사해도 놀라지마세요!>', tts=False)
print("보탐봇강제재시작!")
await asyncio.sleep(2)
inidata_restart = repo_restart.get_contents("restart.txt")
file_data_restart = base64.b64decode(inidata_restart.content)
file_data_restart = file_data_restart.decode('utf-8')
inputData_restart = file_data_restart.split('\n')
if len(inputData_restart) < 3:
contents12 = repo_restart.get_contents("restart.txt")
repo_restart.update_file(contents12.path, "restart_0", "restart\nrestart\nrestrat\n", contents12.sha)
else:
contents12 = repo_restart.get_contents("restart.txt")
repo_restart.update_file(contents12.path, "restart_1", "", contents12.sha)
################ 보탐봇 음성채널 소환 ################
if message.content == command[4]:
if message.author.voice == None:
await client.get_channel(channel).send('음성채널에 먼저 들어가주세요.', tts=False)
else:
voice_channel = message.author.voice.channel
if basicSetting[6] == "":
inidata_voiceCH = repo.get_contents("test_setting.ini")
file_data_voiceCH = base64.b64decode(inidata_voiceCH.content)
file_data_voiceCH = file_data_voiceCH.decode('utf-8')
inputData_voiceCH = file_data_voiceCH.split('\n')
for i in range(len(inputData_voiceCH)):
if inputData_voiceCH[i] == 'voicechannel = \r':
inputData_voiceCH[i] = 'voicechannel = ' + str(voice_channel.id) + '\r'
basicSetting[6] = int(voice_channel.id)
result_voiceCH = '\n'.join(inputData_voiceCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_voiceCH, contents.sha)
elif basicSetting[6] != int(voice_channel.id):
inidata_voiceCH = repo.get_contents("test_setting.ini")
file_data_voiceCH = base64.b64decode(inidata_voiceCH.content)
file_data_voiceCH = file_data_voiceCH.decode('utf-8')
inputData_voiceCH = file_data_voiceCH.split('\n')
for i in range(len(inputData_voiceCH)):
if inputData_voiceCH[i] == 'voicechannel = ' + str(basicSetting[6]) + '\r':
inputData_voiceCH[i] = 'voicechannel = ' + str(voice_channel.id) + '\r'
basicSetting[6] = int(voice_channel.id)
result_voiceCH = '\n'.join(inputData_voiceCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_voiceCH, contents.sha)
await JointheVC(voice_channel, channel)
await client.get_channel(channel).send('< 음성채널 [' + client.get_channel(voice_channel.id).name + '] 접속완료>', tts=False)
################ 저장된 정보 초기화 ################
if message.content == command[6] :
basicSetting = []
bossData = []
fixed_bossData = []
bossTime = []
tmp_bossTime = []
fixed_bossTime = []
bossTimeString = []
bossDateString = []
tmp_bossTimeString = []
tmp_bossDateString = []
bossFlag = []
bossFlag0 = []
fixed_bossFlag = []
fixed_bossFlag0 = []
bossMungFlag = []
bossMungCnt = []
FixedBossDateData = []
indexFixedBossname = []
init()
await dbSave()
await client.get_channel(channel).send('<초기화 완료>', tts=False)
print ("<초기화 완료>")
################ 보스타임 일괄 설정 ################
if message.content.startswith(command[13]):
for i in range(bossNum):
tmp_msg = command[13]
if len(hello) > len(tmp_msg) + 3 :
if hello.find(':') != -1 :
chkpos = hello.find(':')
hours1 = hello[chkpos-2:chkpos]
minutes1 = hello[chkpos+1:chkpos+3]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
chkpos = len(hello)-2
hours1 = hello[chkpos-2:chkpos]
minutes1 = hello[chkpos:chkpos+2]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = now2
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = 1
if tmp_now > now2 :
tmp_now = tmp_now + datetime.timedelta(days=int(-1))
if tmp_now < now2 :
deltaTime = datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
while now2 > tmp_now :
tmp_now = tmp_now + deltaTime
bossMungCnt[i] = bossMungCnt[i] + 1
now2 = tmp_now
bossMungCnt[i] = bossMungCnt[i] - 1
else :
now2 = now2 + datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
tmp_bossTime[i] = bossTime[i] = nextTime = now2
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
await dbSave()
await dbLoad()
await dbSave()
await client.get_channel(channel).send('<보스 일괄 입력 완료>', tts=False)
print ("<보스 일괄 입력 완료>")
################ 보탐봇 기본 설정확인 ################
if message.content == command[1]:
setting_val = '보탐봇버전 : Server Ver. 15.1 (2020. 2. 14.)\n'
setting_val += '음성채널 : ' + client.get_channel(basicSetting[6]).name + '\n'
setting_val += '텍스트채널 : ' + client.get_channel(basicSetting[7]).name +'\n'
if basicSetting[8] != "" :
setting_val += '사다리채널 : ' + client.get_channel(int(basicSetting[8])).name + '\n'
if basicSetting[11] != "" :
setting_val += '정산채널 : ' + client.get_channel(int(basicSetting[11])).name + '\n'
setting_val += '보스젠알림시간1 : ' + basicSetting[1] + ' 분 전\n'
setting_val += '보스젠알림시간2 : ' + basicSetting[3] + ' 분 전\n'
setting_val += '보스멍확인시간 : ' + basicSetting[2] + ' 분 후\n'
embed = discord.Embed(
title = "----- 설정내용 -----",
description= '```' + setting_val + '```',
color=0xff00ff
)
await client.get_channel(channel).send(embed=embed, tts=False)
################ my_bot.db에 저장된 보스타임 불러오기 ################
if message.content == command[5] :
await dbLoad()
if LoadChk == 0:
await client.get_channel(channel).send('<불러오기 완료>', tts=False)
else:
await client.get_channel(channel).send('<보스타임 정보가 없습니다.>', tts=False)
################ 가장 근접한 보스타임 출력 ################
if message.content == '!ㅂ' or message.content == command[14] or message.content == '!ㅃ' or message.content == '!Q':
checkTime = datetime.datetime.now() + datetime.timedelta(days=1, hours = int(basicSetting[0]))
datelist = []
datelist2 = []
temp_bossTime1 = []
ouput_bossData = []
aa = []
sorted_datelist = []
for i in range(bossNum):
if bossMungFlag[i] != True and bossTimeString[i] != '99:99:99' :
datelist2.append(bossTime[i])
for i in range(fixed_bossNum):
if fixed_bossTime[i] < datetime.datetime.now() + datetime.timedelta(hours=int(basicSetting[0])+3):
datelist2.append(fixed_bossTime[i])
datelist = list(set(datelist2))
for i in range(bossNum):
if bossMungFlag[i] != True :
aa.append(bossData[i][0]) #output_bossData[0] : 보스명
aa.append(bossTime[i]) #output_bossData[1] : 시간
aa.append(bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : 시간(00:00:00)
ouput_bossData.append(aa)
aa = []
for i in range(fixed_bossNum):
aa.append(fixed_bossData[i][0]) #output_bossData[0] : 보스명
aa.append(fixed_bossTime[i]) #output_bossData[1] : 시간
aa.append(fixed_bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : 시간(00:00:00)
ouput_bossData.append(aa)
aa = []
tmp_sorted_datelist = sorted(datelist)
for i in range(len(tmp_sorted_datelist)):
if checkTime > tmp_sorted_datelist[i]:
sorted_datelist.append(tmp_sorted_datelist[i])
if len(sorted_datelist) == 0:
await client.get_channel(channel).send( '<보스타임 정보가 없습니다.>', tts=False)
else :
result_lefttime = ''
if len(sorted_datelist) > int(basicSetting[9]):
for j in range(int(basicSetting[9])):
for i in range(len(ouput_bossData)):
if sorted_datelist[j] == ouput_bossData[i][1]:
leftTime = ouput_bossData[i][1] - (datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0])))
total_seconds = int(leftTime.total_seconds())
hours, remainder = divmod(total_seconds,60*60)
minutes, seconds = divmod(remainder,60)
result_lefttime += '다음 ' + ouput_bossData[i][0] + '탐까지 %02d:%02d:%02d 남았습니다. ' % (hours,minutes,seconds) + '[' + ouput_bossData[i][2] + ']\n'
else :
for j in range(len(sorted_datelist)):
for i in range(len(ouput_bossData)):
if sorted_datelist[j] == ouput_bossData[i][1]:
leftTime = ouput_bossData[i][1] - (datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0])))
total_seconds = int(leftTime.total_seconds())
hours, remainder = divmod(total_seconds,60*60)
minutes, seconds = divmod(remainder,60)
result_lefttime += '다음 ' + ouput_bossData[i][0] + '탐까지 %02d:%02d:%02d 남았습니다. ' % (hours,minutes,seconds) + '[' + ouput_bossData[i][2] + ']\n'
embed = discord.Embed(
description= result_lefttime,
color=0xff0000
)
await client.get_channel(channel).send( embed=embed, tts=False)
################ 보스타임 출력 ################
if message.content == command[20] or message.content == '/1' or message.content == '/보스':
datelist = []
datelist2 = []
temp_bossTime1 = []
ouput_bossData = []
aa = []
for i in range(bossNum):
if bossMungFlag[i] == True :
datelist2.append(tmp_bossTime[i])
else :
datelist2.append(bossTime[i])
for i in range(fixed_bossNum):
if fixed_bossTime[i] < datetime.datetime.now() + datetime.timedelta(hours=int(basicSetting[0])+3):
datelist2.append(fixed_bossTime[i])
datelist = list(set(datelist2))
tmp_boss_information = []
tmp_cnt = 0
tmp_boss_information.append('')
for i in range(bossNum):
if bossTimeString[i] == '99:99:99' and bossMungFlag[i] != True :
if len(tmp_boss_information[tmp_cnt]) > 1000 :
tmp_boss_information.append('')
tmp_cnt += 1
tmp_boss_information[tmp_cnt] = tmp_boss_information[tmp_cnt] + bossData[i][0] + ','
else :
aa.append(bossData[i][0]) #output_bossData[0] : 보스명
if bossMungFlag[i] == True :
aa.append(tmp_bossTime[i]) #output_bossData[1] : 시간
aa.append(tmp_bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : 시간(00:00:00) -> 초빼기 : aa.append(tmp_bossTime[i].strftime('%H:%M'))
aa.append('-') #output_bossData[3] : -
else :
aa.append(bossTime[i]) #output_bossData[1] : 시간
aa.append(bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : 시간(00:00:00) -> 초빼기 : aa.append(bossTime[i].strftime('%H:%M'))
aa.append('+') #output_bossData[3] : +
aa.append(bossData[i][2]) #output_bossData[4] : 멍/미입력 보스
aa.append(bossMungCnt[i]) #output_bossData[5] : 멍/미입력횟수
aa.append(bossData[i][6]) #output_bossData[6] : 메세지
ouput_bossData.append(aa)
aa = []
for i in range(fixed_bossNum):
aa.append(fixed_bossData[i][0]) #output_bossData[0] : 보스명
aa.append(fixed_bossTime[i]) #output_bossData[1] : 시간
aa.append(fixed_bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : 시간(00:00:00) -> 초빼기 : aa.append(fixed_bossTime[i].strftime('%H:%M'))
aa.append('@') #output_bossData[3] : @
aa.append(0) #output_bossData[4] : 멍/미입력 보스
aa.append(0) #output_bossData[5] : 멍/미입력횟수
aa.append("") #output_bossData[6] : 메세지
ouput_bossData.append(aa)
aa = []
boss_information = []
cnt = 0
boss_information.append('')
for timestring in sorted(datelist):
if len(boss_information[cnt]) > 1800 :
boss_information.append('')
cnt += 1
for i in range(len(ouput_bossData)):
if timestring == ouput_bossData[i][1]:
if ouput_bossData[i][4] == '0' :
if ouput_bossData[i][5] == 0 :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' ' + ouput_bossData[i][6] + '\n'
else :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' (미 ' + str(ouput_bossData[i][5]) + '회)' + ' ' + ouput_bossData[i][6] + '\n'
else :
if ouput_bossData[i][5] == 0 :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' ' + ouput_bossData[i][6] + '\n'
else :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' (멍 ' + str(ouput_bossData[i][5]) + '회)' + ' ' + ouput_bossData[i][6] + '\n'
if len(boss_information) == 1 and len(tmp_boss_information) == 1:
###########################
if len(boss_information[0]) != 0:
boss_information[0] = "```diff\n" + boss_information[0] + "\n```"
else :
boss_information[0] = '``` ```'
if len(tmp_boss_information[0]) != 0:
tmp_boss_information[0] = "```fix\n" + tmp_boss_information[0][:len(tmp_boss_information[0])-1] + "\n```"
else :
tmp_boss_information[0] = '``` ```'
embed = discord.Embed(
title = "----- 보스탐 정보 -----",
description= boss_information[0],
color=0x0000ff
)
embed.add_field(
name="----- 미예약 보스 -----",
value= tmp_boss_information[0],
inline = False
)
await client.get_channel(channel).send( embed=embed, tts=False)
else :
###########################일반보스출력
if len(boss_information[0]) != 0:
boss_information[0] = "```diff\n" + boss_information[0] + "\n```"
else :
boss_information[0] = '``` ```'
embed = discord.Embed(
title = "----- 보스탐 정보 -----",
description= boss_information[0],
color=0x0000ff
)
await client.get_channel(channel).send( embed=embed, tts=False)
for i in range(len(boss_information)-1):
if len(boss_information[i+1]) != 0:
boss_information[i+1] = "```diff\n" + boss_information[i+1] + "\n```"
else :
boss_information[i+1] = '``` ```'
embed = discord.Embed(
title = '',
description= boss_information[i+1],
color=0x0000ff
)
await client.get_channel(channel).send( embed=embed, tts=False)
###########################미예약보스출력
if len(tmp_boss_information[0]) != 0:
if len(tmp_boss_information) == 1 :
tmp_boss_information[0] = "```fix\n" + tmp_boss_information[0][:len(tmp_boss_information[0])-1] + "\n```"
else:
tmp_boss_information[0] = "```fix\n" + tmp_boss_information[0] + "\n```"
else :
tmp_boss_information[0] = '``` ```'
embed = discord.Embed(
title = "----- 미예약 보스 -----",
description= tmp_boss_information[0],
color=0x0000ff
)
await client.get_channel(channel).send( embed=embed, tts=False)
for i in range(len(tmp_boss_information)-1):
if len(tmp_boss_information[i+1]) != 0:
if i == len(tmp_boss_information)-2:
tmp_boss_information[i+1] = "```fix\n" + tmp_boss_information[i+1][:len(tmp_boss_information[i+1])-1] + "\n```"
else:
tmp_boss_information[i+1] = "```fix\n" + tmp_boss_information[i+1] + "\n```"
else :
tmp_boss_information[i+1] = '``` ```'
embed = discord.Embed(
title = '',
description= tmp_boss_information[i+1],
color=0x0000ff
)
await client.get_channel(channel).send( embed=embed, tts=False)
await dbSave()
await kill_list_Save()
################ 보스타임 출력(고정보스포함) ################
if message.content == command[21]:
datelist = []
datelist2 = []
temp_bossTime1 = []
ouput_bossData = []
aa = []
fixed_datelist = []
for i in range(bossNum):
if bossMungFlag[i] == True :
datelist2.append(tmp_bossTime[i])
else :
datelist2.append(bossTime[i])
datelist = list(set(datelist2))
tmp_boss_information = []
tmp_cnt = 0
tmp_boss_information.append('')
for i in range(bossNum):
if bossTimeString[i] == '99:99:99' and bossMungFlag[i] != True :
if len(tmp_boss_information[tmp_cnt]) > 1800 :
tmp_boss_information.append('')
tmp_cnt += 1
tmp_boss_information[tmp_cnt] = tmp_boss_information[tmp_cnt] + bossData[i][0] + ','
else :
aa.append(bossData[i][0]) #output_bossData[0] : 보스명
if bossMungFlag[i] == True :
aa.append(tmp_bossTime[i]) #output_bossData[1] : 시간
aa.append(tmp_bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : 시간(00:00:00) -> 초빼기 : aa.append(tmp_bossTime[i].strftime('%H:%M'))
aa.append('-') #output_bossData[3] : -
else :
aa.append(bossTime[i]) #output_bossData[1] : 시간
aa.append(bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : 시간(00:00:00) -> 초빼기 : aa.append(bossTime[i].strftime('%H:%M'))
aa.append('+') #output_bossData[3] : +
aa.append(bossData[i][2]) #output_bossData[4] : 멍/미입력 보스
aa.append(bossMungCnt[i]) #output_bossData[5] : 멍/미입력횟수
aa.append(bossData[i][6]) #output_bossData[6] : 메세지
ouput_bossData.append(aa)
aa = []
for i in range(fixed_bossNum):
fixed_datelist.append(fixed_bossTime[i])
fixed_datelist = list(set(fixed_datelist))
fixedboss_information = []
cntF = 0
fixedboss_information.append('')
for timestring1 in sorted(fixed_datelist):
if len(fixedboss_information[cntF]) > 1800 :
fixedboss_information.append('')
cntF += 1
for i in range(fixed_bossNum):
if timestring1 == fixed_bossTime[i]:
if (datetime.datetime.now() + datetime.timedelta(hours=int(basicSetting[0]))).strftime('%Y-%m-%d') == fixed_bossTime[i].strftime('%Y-%m-%d'):
tmp_timeSTR = fixed_bossTime[i].strftime('%H:%M:%S') #초빼기 : tmp_timeSTR = fixed_bossTime[i].strftime('%H:%M')
else:
tmp_timeSTR = '[' + fixed_bossTime[i].strftime('%Y-%m-%d') + '] ' + fixed_bossTime[i].strftime('%H:%M:%S') #초빼기 : tmp_timeSTR = '[' + fixed_bossTime[i].strftime('%Y-%m-%d') + '] ' + fixed_bossTime[i].strftime('%H:%M')
fixedboss_information[cntF] = fixedboss_information[cntF] + tmp_timeSTR + ' : ' + fixed_bossData[i][0] + '\n'
boss_information = []
cnt = 0
boss_information.append('')
for timestring in sorted(datelist):
if len(boss_information[cnt]) > 1800 :
boss_information.append('')
cnt += 1
for i in range(len(ouput_bossData)):
if timestring == ouput_bossData[i][1]:
if ouput_bossData[i][4] == '0' :
if ouput_bossData[i][5] == 0 :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' ' + ouput_bossData[i][6] + '\n'
else :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' (미 ' + str(ouput_bossData[i][5]) + '회)' + ' ' + ouput_bossData[i][6] + '\n'
else :
if ouput_bossData[i][5] == 0 :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' ' + ouput_bossData[i][6] + '\n'
else :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' (멍 ' + str(ouput_bossData[i][5]) + '회)' + ' ' + ouput_bossData[i][6] + '\n'
###########################고정보스출력
if len(fixedboss_information[0]) != 0:
fixedboss_information[0] = "```diff\n" + fixedboss_information[0] + "\n```"
else :
fixedboss_information[0] = '``` ```'
embed = discord.Embed(
title = "----- 고 정 픽 -----",
description= fixedboss_information[0],
color=0x0000ff
)
await client.get_channel(channel).send( embed=embed, tts=False)
for i in range(len(fixedboss_information)-1):
if len(fixedboss_information[i+1]) != 0:
fixedboss_information[i+1] = "```diff\n" + fixedboss_information[i+1] + "\n```"
else :
fixedboss_information[i+1] = '``` ```'
embed = discord.Embed(
title = '',
description= fixedboss_information[i+1],
color=0x0000ff
)
await client.get_channel(channel).send( embed=embed, tts=False)
###########################일반보스출력
if len(boss_information[0]) != 0:
boss_information[0] = "```diff\n" + boss_information[0] + "\n```"
else :
boss_information[0] = '``` ```'
embed = discord.Embed(
title = "----- 보스탐 정보 -----",
description= boss_information[0],
color=0x0000ff
)
await client.get_channel(channel).send( embed=embed, tts=False)
for i in range(len(boss_information)-1):
if len(boss_information[i+1]) != 0:
boss_information[i+1] = "```diff\n" + boss_information[i+1] + "\n```"
else :
boss_information[i+1] = '``` ```'
embed = discord.Embed(
title = '',
description= boss_information[i+1],
color=0x0000ff
)
await client.get_channel(channel).send( embed=embed, tts=False)
###########################미예약보스출력
if len(tmp_boss_information[0]) != 0:
if len(tmp_boss_information) == 1 :
tmp_boss_information[0] = "```fix\n" + tmp_boss_information[0][:len(tmp_boss_information[0])-1] + "\n```"
else:
tmp_boss_information[0] = "```fix\n" + tmp_boss_information[0] + "\n```"
else :
tmp_boss_information[0] = '``` ```'
embed = discord.Embed(
title = "----- 미예약 보스 -----",
description= tmp_boss_information[0],
color=0x0000ff
)
await client.get_channel(channel).send( embed=embed, tts=False)
for i in range(len(tmp_boss_information)-1):
if len(tmp_boss_information[i+1]) != 0:
if i == len(tmp_boss_information)-2:
tmp_boss_information[i+1] = "```fix\n" + tmp_boss_information[i+1][:len(tmp_boss_information[i+1])-1] + "\n```"
else:
tmp_boss_information[i+1] = "```fix\n" + tmp_boss_information[i+1] + "\n```"
else :
tmp_boss_information[i+1] = '``` ```'
embed = discord.Embed(
title = '',
description= tmp_boss_information[i+1],
color=0x0000ff
)
await client.get_channel(channel).send( embed=embed, tts=False)
await dbSave()
await kill_list_Save()
################ 현재시간 확인 ################
if message.content == command[17] :
curruntTime = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
embed = discord.Embed(
title = '현재시간은 ' + curruntTime.strftime('%H') + '시 ' + curruntTime.strftime('%M') + '분 ' + curruntTime.strftime('%S')+ '초 입니다.',
color=0xff00ff
)
await client.get_channel(channel).send( embed=embed, tts=False)
################ 리젠시간 출력 ################
if message.content == command[16] :
await client.get_channel(channel).send(embed=regenembed, tts=False)
################ 명존쎄 ################
if message.content == command[7]:
await client.get_channel(channel).send( '< 명치의 아픔을 알리지 말라... >', tts=False)
for i in range(bossNum):
if bossMungFlag[i] == True:
bossTimeString[i] = tmp_bossTime[i].strftime('%H:%M:%S')
bossDateString[i] = tmp_bossTime[i].strftime('%Y-%m-%d')
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
await dbSave()
print("명치!")
await voice_client1.disconnect()
#client.clear()
raise SystemExit
################ 인중쎄 ################
if message.content == command[7]:
await client.get_channel(channel).send( '< 인중을 때리다니 상상도 못했다! >', tts=False)
for i in range(bossNum):
if bossMungFlag[i] == True:
bossTimeString[i] = tmp_bossTime[i].strftime('%H:%M:%S')
bossDateString[i] = tmp_bossTime[i].strftime('%Y-%m-%d')
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
await dbSave()
print("인중!")
await voice_client1.disconnect()
#client.clear()
raise SystemExit
################ 상태메세지변경 ################
if message.content.startswith(command[19]):
tmp_sayMessage = message.content
sayMessage = tmp_sayMessage[len(command[19])+1:]
await client.change_presence(status=discord.Status.dnd, activity=discord.Game(name=sayMessage, type=1), afk = False)
await client.get_channel(channel).send( '< 상태메세지 변경완료 >', tts=False)
################ 공지확인, 입력 및 삭제 ################
if message.content == command[18]:
notice_initdata = repo.get_contents("notice.ini")
notice = base64.b64decode(notice_initdata.content)
notice = notice.decode('utf-8')
if notice != '' :
embed = discord.Embed(
description= str(notice),
color=0xff00ff
)
else :
embed = discord.Embed(
description= '등록된 공지가 없습니다.',
color=0xff00ff
)
await msg.channel.send(embed=embed, tts=False)
if message.content.startswith(command[18] + ' '):
tmp_sayMessage = message.content
sayMessage = tmp_sayMessage[len(command[18])+1:]
contents = repo.get_contents("notice.ini")
repo.update_file(contents.path, "notice 등록", sayMessage, contents.sha)
await client.get_channel(channel).send( '< 공지 등록완료 >', tts=False)
if message.content == command[18] + '삭제':
contents = repo.get_contents("notice.ini")
repo.update_file(contents.path, "notice 삭제", '', contents.sha)
await client.get_channel(channel).send( '< 공지 삭제완료 >', tts=False)
################ 정산확인 ################
if message.content.startswith(command[12]):
if basicSetting[10] !="" and basicSetting[12] !="" and basicSetting[14] !="" and basicSetting[15] !="" and basicSetting[16] !="" :
SearchID = hello[len(command[12])+1:]
gc = gspread.authorize(credentials)
wks = gc.open(basicSetting[12]).worksheet(basicSetting[14])
wks.update_acell(basicSetting[15], SearchID)
result = wks.acell(basicSetting[16]).value
embed = discord.Embed(
description= '```' + SearchID + ' 님이 받을 다이야는 ' + result + ' 다이야 입니다.```',
color=0xff00ff
)
await msg.channel.send(embed=embed, tts=False)
################ 킬초기화 ################
if message.content == command[22]:
await initkill_list()
await client.get_channel(channel).send( '< 킬 목록 초기화완료 >', tts=False)
################ 킬명단 확인 ################
if message.content == command[23]:
kill_output = ''
for i in range(len(kill_Data)):
if kill_Data[i][0] != '':
kill_output += ':skull_crossbones: ' + str(kill_Data[i][0]) + ' : ' + str(kill_Data[i][1]) + '번 따히!\n'
if kill_output != '' :
embed = discord.Embed(
description= str(kill_output),
color=0xff00ff
)
else :
embed = discord.Embed(
description= '등록된 킬 목록이 없습니다. 분발하세요!',
color=0xff00ff
)
await client.get_channel(channel).send(embed=embed, tts=False)
################ 킬등록 ################
if message.content.startswith(command[23]+' '):
tmp_sayMessage = message.content
sayMessage = tmp_sayMessage[len(command[23])+1:]
tmp_fk = []
listchk = 0
if sayMessage != ' ':
for i in range(len(kill_Data)):
if sayMessage == kill_Data[i][0]:
kill_Data[i][1] = int(kill_Data[i][1]) + 1
listchk = 1
if listchk == 0:
tmp_fk.append(sayMessage)
tmp_fk.append(1)
kill_Data.append(tmp_fk)
tmp_fk = []
embed = discord.Embed(
description= ':skull_crossbones:' + sayMessage + ' 따히!\n',
color=0xff00ff
)
await client.get_channel(channel).send(embed=embed, tts=False)
else:
await client.get_channel(channel).send( '```제대로 된 아이디를 입력해주세요!\n```', tts=False)
################ 킬삭제 ################
if message.content.startswith(command[24]+' '):
tmp_sayMessage = message.content
sayMessage = tmp_sayMessage[len(command[24])+1:]
tmp_fk = []
indexchk = 0
if sayMessage != ' ':
for i in range(len(kill_Data)):
if sayMessage == kill_Data[i][0]:
indexchk = i + 1
if indexchk != 0:
del(kill_Data[indexchk-1])
await client.get_channel(channel).send( '```<' + sayMessage + '> 킬 목록 삭제완료!\n```', tts=False)
else :
await client.get_channel(channel).send( '```킬 목록에 등록되어 있지 않습니다!\n```', tts=False)
else:
await client.get_channel(channel).send( '```제대로 된 아이디를 입력해주세요!\n```', tts=False)
client.loop.create_task(task())
try:
client.loop.run_until_complete(client.start(access_token))
except SystemExit:
handle_exit()
except KeyboardInterrupt:
handle_exit()
#client.loop.close()
#print("Program ended")
#break
print("Bot restarting")
client = discord.Client(loop=client.loop)
| 37.902905
| 274
| 0.590228
|
1df59a14c8896828be6998a4961f5ee84d4f5850
| 2,955
|
py
|
Python
|
som/postprocessing.py
|
fredhusser/som
|
7597d721394f2d011bc7cbccdea4ef9a9b845804
|
[
"BSD-3-Clause"
] | null | null | null |
som/postprocessing.py
|
fredhusser/som
|
7597d721394f2d011bc7cbccdea4ef9a9b845804
|
[
"BSD-3-Clause"
] | null | null | null |
som/postprocessing.py
|
fredhusser/som
|
7597d721394f2d011bc7cbccdea4ef9a9b845804
|
[
"BSD-3-Clause"
] | null | null | null |
# encoding = utf-8
# filename = postprocessing.py
# author = Frederic Husser
# Description:
# This module contains the post-processing tools for working
# on the data from the main data analysis pipeline. The raw data
# can be prepared for visualization and for publication into
# a DataBase.
import numpy as np
from scipy import sparse
from sklearn.preprocessing import Normalizer
# Utilities for the processing of 2D maps
from som import debug
from som import _get_node_coordinates, _get_neighbors
def build_H_matrix(X, kohonen):
"""Build the reprentation of the hits matrix from the INPUT space into
the OUTPUT space, given the sample matrix X.
Required is to have the same sample set as used for training, that is
to say the same n_features dimensions.
Parameters
----------
X: array-like CSR matrix
Sparse representation of the samples in the INPUT space in a CSR
matrix of shape = [n_samples, n_features]. Must be the same as
that used for fitting the map.
Return
------
H_matrix : ndarray
Numpy array of shape = [n_nodes] of integers giving for each node
the number of best matching documents
"""
# Initialize the hits matrix as an ndarray of ints
debug("Build_H_matrix","Starting the counting of hits...")
debug("Build_H_matrix","Using %d documents with %d features" % X.shape)
n_nodes = kohonen.shape[0]
H_matrix = np.zeros(n_nodes, dtype = np.int)
KN = Normalizer().fit_transform(kohonen)
# Get the best matching units for all vectors
n_samples, n_features = X.shape
for i in xrange(n_samples):
bmu_idx = c_get_bmu(KN,X.getrow(i))[0]
H_matrix[bmu_idx]+=1
print bmu_idx
return H_matrix
def build_P_matrix(kohonen, features):
"""Build the projection matrix given a set of features from
the INPUT space of the Kohonen map. The projection is based
on the calculation of the mean of the selected features for
each node.
Parameters
---------
features: ndarray of integers
Numpy array of the features to be selected for the projection
If None all features are selected.
Return
------
P_matrix: ndarray
Numpy array of shape n_nodes, dtype= np.double giving the value
of the mean of the projected samples applied on the normalized
Kohonen matrix
"""
# Normalization of the Kohonen matrix is necessary for validity
debug("Build_P_matrix", "Starting the projection...")
KN = Normalizer().fit_transform(kohonen).tolil()
n_nodes, n_features = KN.shape
P_matrix = np.zeros(n_nodes)
# Slice over the rows of the matrix and build the projection
for i in np.arange(n_nodes):
selected_features = np.intersect1d(features,KN.rows[i])
data = np.asarray(KN.data[i], dtype=np.double)
P_matrix[i] = np.mean(data[selected_features])
return P_matrix
| 34.360465
| 75
| 0.690694
|
00b2a802254f522902e445aaca97ed661f86fef5
| 102,722
|
py
|
Python
|
pysnmp-with-texts/LISP-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 8
|
2019-05-09T17:04:00.000Z
|
2021-06-09T06:50:51.000Z
|
pysnmp-with-texts/LISP-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 4
|
2019-05-31T16:42:59.000Z
|
2020-01-31T21:57:17.000Z
|
pysnmp-with-texts/LISP-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module LISP-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/LISP-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:04:38 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueSizeConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion")
AddressFamilyNumbers, = mibBuilder.importSymbols("IANA-ADDRESS-FAMILY-NUMBERS-MIB", "AddressFamilyNumbers")
MplsL3VpnName, = mibBuilder.importSymbols("MPLS-L3VPN-STD-MIB", "MplsL3VpnName")
NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ObjectGroup", "ModuleCompliance")
Counter32, TimeTicks, MibScalar, MibTable, MibTableRow, MibTableColumn, mib_2, Bits, ObjectIdentity, NotificationType, ModuleIdentity, MibIdentifier, iso, Counter64, Unsigned32, IpAddress, Integer32, Gauge32 = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "TimeTicks", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "mib-2", "Bits", "ObjectIdentity", "NotificationType", "ModuleIdentity", "MibIdentifier", "iso", "Counter64", "Unsigned32", "IpAddress", "Integer32", "Gauge32")
TimeStamp, TruthValue, TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TimeStamp", "TruthValue", "TextualConvention", "DisplayString")
lispMIB = ModuleIdentity((1, 3, 6, 1, 2, 1, 220))
lispMIB.setRevisions(('2013-10-21 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: lispMIB.setRevisionsDescriptions(('Initial version of the IETF LISP-MIB module. Published as RFC 7052.',))
if mibBuilder.loadTexts: lispMIB.setLastUpdated('201310210000Z')
if mibBuilder.loadTexts: lispMIB.setOrganization('IETF Locator/ID Separation Protocol (LISP) Working Group')
if mibBuilder.loadTexts: lispMIB.setContactInfo('Email: lisp@ietf.org WG charter: http://datatracker.ietf.org/wg/lisp/charter/')
if mibBuilder.loadTexts: lispMIB.setDescription("This MIB module contains managed objects to support monitoring devices that support the Locator/ID Separation Protocol (LISP). Copyright (c) 2013 IETF Trust and the persons identified as authors of the code. All rights reserved. Redistribution and use in source and binary forms, with or without modification, is permitted pursuant to, and subject to the license terms contained in, the Simplified BSD License set forth in Section 4.c of the IETF Trust's Legal Provisions Relating to IETF Documents (http://trustee.ietf.org/license-info).")
class LispAddressType(TextualConvention, OctetString):
reference = 'RFC 6830, Section 14.2 and LISP Canonical Address Format (LCAF), Work in Progress, March 2013.'
description = 'LISP architecture can be applied to a wide variety of address-families. This textual-convention is a generalization for representing addresses belonging to those address-families. For convenience, this document refers to any such address as a LISP address. LispAddressType textual-convention consists of the following four-tuple: 1. IANA Address Family Number: A field of length 2 octets, whose value is of the form following the assigned AddressFamilyNumbers textual-convention described in IANA-ADDRESS-FAMILY-NUMBERS-MIB DEFINITIONS, available from http://www.iana.org/assignments/ianaaddressfamilynumbers-mib. The enumerations are also listed in [IANA]. Note that this list of address family numbers is maintained by IANA. 2. Length of LISP address: A field of length 1 octet, whose value indicates the octet-length of the next (third) field of this LispAddressType four-tuple. 3. LISP address: A field of variable length as indicated in the previous (second) field, whose value is an address of the IANA Address Family indicated in the first field of this LispAddressType four-tuple. Note that any of the IANA Address Families can be represented. Particularly when the address family is LISP Canonical Address Format (LCAF) with IANA-assigned Address Family Number 16387, then the first octet of this field indicates the LCAF type, and the rest of this field is same as the encoding format of the LISP Canonical Address after the length field, as defined in LCAF document. 4. Mask-length of address: A field of length 1 octet, whose value is the mask-length to be applied to the LISP address specified in the previous (third) field. To illustrate the use of this object, consider the LISP MIB Object below titled lispMapCacheEntry. This object begins with the following entities: lispMapCacheEntry ::= SEQUENCE { lispMapCacheEidLength INTEGER, lispMapCacheEid LispAddressType, ... [skip] ... Example 1: Suppose that the IPv4 EID-Prefix stored is 192.0.2.0/24. In this case, the values within lispMapCacheEntry would be: lispMapCacheEidLength = 8 lispMapCacheEid = 1, 4, 192.0.2.0, 24 ... [skip] ... where 8 is the total length in octets of the next object (lispMapCacheEID of type LispAddressType). Then, the value 1 indicates the IPv4 AF (per the IANA-ADDRESS-FAMILY-NUMBERS-MIB), the value 4 indicates that the AF is 4 octets in length, 192.0.2.0 is the IPv4 address, and the value 24 is the mask-length in bits. Note that the lispMapCacheEidLength value of 8 is used to compute the length of the fourth (last) field in lispMapCacheEid to be 1 octet -- as computed by 8 - (2 + 1 + 4) = 1. Example 2: Suppose that the IPv6 EID-Prefix stored is 2001:db8:a::/48. In this case, the values within lispMapCacheEntry would be: lispMapCacheEidLength = 20 lispMapCacheEid = 2, 16, 2001:db8:a::, 48 ... [skip] ... where 20 is the total length in octets of the next object (lispMapCacheEID of type LispAddressType). Then, the value 2 indicates the IPv6 AF (per the IANA-ADDRESS-FAMILY-NUMBERS-MIB), the value 16 indicates that the AF is 16 octets in length, 2001:db8:a:: is the IPv6 address, and the value 48 is the mask-length in bits. Note that the lispMapCacheEidLength value of 20 is used to compute the length of the fourth (last) field in lispMapCacheEid to be 1 octet -- as computed by 20 - (2 + 1 + 16) = 1. Example 3: As an example where LCAF is used, suppose that the IPv4 EID-Prefix stored is 192.0.2.0/24 and it is part of LISP Instance ID 101. In this case, the values within lispMapCacheEntry would be: lispMapCacheEidLength = 11 lispMapCacheEid = 16387, 7, 2, 101, 1, 192.0.2.0, 24 ... [skip] ... where 11 is the total length in octets of the next object (lispMapCacheEID of type LispAddressType). Then, the value 16387 indicates the LCAF AF (see the IANA-ADDRESS-FAMILY-NUMBERS-MIB), the value 7 indicates that the LCAF AF is 7 octets in length in this case, 2 indicates that LCAF Type 2 encoding is used (see the LCAF document), 101 gives the Instance ID, 1 gives the AFI (per the IANA-ADDRESS-FAMILY-NUMBERS-MIB) for an IPv4 address, 192.0.2.0 is the IPv4 address, and 24 is the mask-length in bits. Note that the lispMapCacheEidLength value of 11 octets is used to compute the length of the last field in lispMapCacheEid to be 1 octet -- as computed by 11 - (2 + 1 + 1 + 1 + 1 + 4) = 1. Note: all LISP header formats and locations of specific flags, bits, and fields are as given in the base LISP references of RFC 6830, RFC 6832, and RFC 6833.'
status = 'current'
displayHint = '39a'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(5, 39)
lispObjects = MibIdentifier((1, 3, 6, 1, 2, 1, 220, 1))
lispConformance = MibIdentifier((1, 3, 6, 1, 2, 1, 220, 2))
lispFeaturesTable = MibTable((1, 3, 6, 1, 2, 1, 220, 1, 1), )
if mibBuilder.loadTexts: lispFeaturesTable.setReference('RFC 6830, Section 4, Section 5.5., Section 6.3.')
if mibBuilder.loadTexts: lispFeaturesTable.setStatus('current')
if mibBuilder.loadTexts: lispFeaturesTable.setDescription('This table represents the ON/OFF status of the various LISP features that can be enabled on LISP devices.')
lispFeaturesEntry = MibTableRow((1, 3, 6, 1, 2, 1, 220, 1, 1, 1), ).setIndexNames((0, "LISP-MIB", "lispFeaturesInstanceID"), (0, "LISP-MIB", "lispFeaturesAddressFamily"))
if mibBuilder.loadTexts: lispFeaturesEntry.setStatus('current')
if mibBuilder.loadTexts: lispFeaturesEntry.setDescription('An entry (conceptual row) in the lispFeaturesTable.')
lispFeaturesInstanceID = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 1, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 16777215)))
if mibBuilder.loadTexts: lispFeaturesInstanceID.setStatus('current')
if mibBuilder.loadTexts: lispFeaturesInstanceID.setDescription('This represents the Instance ID of the LISP header. An Instance ID in the LISP address encoding helps uniquely identify the AFI-based address space to which a given EID belongs. Its default value is 0.')
lispFeaturesAddressFamily = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 1, 1, 2), AddressFamilyNumbers())
if mibBuilder.loadTexts: lispFeaturesAddressFamily.setStatus('current')
if mibBuilder.loadTexts: lispFeaturesAddressFamily.setDescription('The IANA Address Family Number of destination address of packets that this LISP device is enabled to process.')
lispFeaturesItrEnabled = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 1, 1, 3), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispFeaturesItrEnabled.setStatus('current')
if mibBuilder.loadTexts: lispFeaturesItrEnabled.setDescription('Indicates the status of ITR role on this device. If this object is true, then the ITR feature is enabled.')
lispFeaturesEtrEnabled = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 1, 1, 4), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispFeaturesEtrEnabled.setStatus('current')
if mibBuilder.loadTexts: lispFeaturesEtrEnabled.setDescription('Indicates the status of ETR role on this device. If this object is true, then the ETR feature is enabled.')
lispFeaturesProxyItrEnabled = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 1, 1, 5), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispFeaturesProxyItrEnabled.setStatus('current')
if mibBuilder.loadTexts: lispFeaturesProxyItrEnabled.setDescription('Indicates the status of Proxy-ITR role on this device. If this object is true, then the Proxy-ITR feature is enabled.')
lispFeaturesProxyEtrEnabled = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 1, 1, 6), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispFeaturesProxyEtrEnabled.setStatus('current')
if mibBuilder.loadTexts: lispFeaturesProxyEtrEnabled.setDescription('Indicates the status of Proxy-ETR role on this device. If this object is true, then the Proxy-ETR feature is enabled.')
lispFeaturesMapServerEnabled = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 1, 1, 7), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispFeaturesMapServerEnabled.setStatus('current')
if mibBuilder.loadTexts: lispFeaturesMapServerEnabled.setDescription('Indicates the status of Map Server role on this device. If this object is true, then the Map-Server feature is enabled.')
lispFeaturesMapResolverEnabled = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 1, 1, 8), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispFeaturesMapResolverEnabled.setStatus('current')
if mibBuilder.loadTexts: lispFeaturesMapResolverEnabled.setDescription('Indicates the status of Map Resolver role on this device. If this object is true, then Map-Resolver feature is enabled.')
lispFeaturesMapCacheSize = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 1, 1, 9), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispFeaturesMapCacheSize.setStatus('current')
if mibBuilder.loadTexts: lispFeaturesMapCacheSize.setDescription('Size of EID-to-RLOC map-cache on this device.')
lispFeaturesMapCacheLimit = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 1, 1, 10), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispFeaturesMapCacheLimit.setStatus('current')
if mibBuilder.loadTexts: lispFeaturesMapCacheLimit.setDescription('Maximum permissible entries in EID-to-RLOC map-cache on this device.')
lispFeaturesEtrMapCacheTtl = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 1, 1, 11), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispFeaturesEtrMapCacheTtl.setStatus('current')
if mibBuilder.loadTexts: lispFeaturesEtrMapCacheTtl.setDescription('The stored Record TTL of the EID-to-RLOC map record in the map-cache.')
lispFeaturesRlocProbeEnabled = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 1, 1, 12), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispFeaturesRlocProbeEnabled.setStatus('current')
if mibBuilder.loadTexts: lispFeaturesRlocProbeEnabled.setDescription('Indicates the status of RLOC-Probing feature on this device. If this object is true, then this feature is enabled.')
lispFeaturesEtrAcceptMapDataEnabled = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 1, 1, 13), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispFeaturesEtrAcceptMapDataEnabled.setStatus('current')
if mibBuilder.loadTexts: lispFeaturesEtrAcceptMapDataEnabled.setDescription('Indicates the status of accepting piggybacked mapping data received in a map-request on this device. If this object is true, then this device accepts piggybacked mapping data.')
lispFeaturesEtrAcceptMapDataVerifyEnabled = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 1, 1, 14), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispFeaturesEtrAcceptMapDataVerifyEnabled.setStatus('current')
if mibBuilder.loadTexts: lispFeaturesEtrAcceptMapDataVerifyEnabled.setDescription('Indicates the status of verifying accepted piggybacked mapping data received in a map-request on this device. If this object is true, then this device verifies accepted piggybacked mapping data.')
lispFeaturesRouterTimeStamp = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 1, 1, 15), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispFeaturesRouterTimeStamp.setStatus('current')
if mibBuilder.loadTexts: lispFeaturesRouterTimeStamp.setDescription('The value of sysUpTime at which the LISP feature was enabled on this device. If this information was present at the most recent reinitialization of the local management subsystem, then this object contains a zero value.')
lispIidToVrfTable = MibTable((1, 3, 6, 1, 2, 1, 220, 1, 2), )
if mibBuilder.loadTexts: lispIidToVrfTable.setReference('RFC 6830, Section 5.5., and RFC 4382, Section 7.')
if mibBuilder.loadTexts: lispIidToVrfTable.setStatus('current')
if mibBuilder.loadTexts: lispIidToVrfTable.setDescription('This table represents the mapping of a LISP Instance ID to a VRF.')
lispIidToVrfEntry = MibTableRow((1, 3, 6, 1, 2, 1, 220, 1, 2, 1), ).setIndexNames((0, "LISP-MIB", "lispFeaturesInstanceID"))
if mibBuilder.loadTexts: lispIidToVrfEntry.setStatus('current')
if mibBuilder.loadTexts: lispIidToVrfEntry.setDescription('An entry (conceptual row) in the lispIidToVrfTable.')
lispIidToVrfName = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 2, 1, 1), MplsL3VpnName()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispIidToVrfName.setStatus('current')
if mibBuilder.loadTexts: lispIidToVrfName.setDescription('The identifier for each VPN that is mapped to the given LISP Instance ID.')
lispGlobalStatsTable = MibTable((1, 3, 6, 1, 2, 1, 220, 1, 3), )
if mibBuilder.loadTexts: lispGlobalStatsTable.setReference('RFC 6830, Section 6.1.')
if mibBuilder.loadTexts: lispGlobalStatsTable.setStatus('current')
if mibBuilder.loadTexts: lispGlobalStatsTable.setDescription('This table provides global statistics for a given Instance ID per address family on a LISP device.')
lispGlobalStatsEntry = MibTableRow((1, 3, 6, 1, 2, 1, 220, 1, 3, 1), ).setIndexNames((0, "LISP-MIB", "lispFeaturesInstanceID"), (0, "LISP-MIB", "lispFeaturesAddressFamily"))
if mibBuilder.loadTexts: lispGlobalStatsEntry.setStatus('current')
if mibBuilder.loadTexts: lispGlobalStatsEntry.setDescription('An entry (conceptual row) in the lispGlobalStatsTable.')
lispGlobalStatsMapRequestsIn = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 3, 1, 1), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispGlobalStatsMapRequestsIn.setStatus('current')
if mibBuilder.loadTexts: lispGlobalStatsMapRequestsIn.setDescription('Total number of map requests received by this device for any EID-Prefix of the given address family and Instance ID. Discontinuities in this monotonically increasing value occur at reinitialization of the management system. Discontinuities can also occur as a result of LISP features being removed, which can be detected by observing the value of lispFeaturesRouterTimeStamp.')
lispGlobalStatsMapRequestsOut = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 3, 1, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispGlobalStatsMapRequestsOut.setStatus('current')
if mibBuilder.loadTexts: lispGlobalStatsMapRequestsOut.setDescription('Total number of map requests sent by this device for any EID-Prefix of the given address family and Instance ID. Discontinuities in this monotonically increasing value occur at reinitialization of the management system. Discontinuities can also occur as a result of LISP features being removed, which can be detected by observing the value of lispFeaturesRouterTimeStamp.')
lispGlobalStatsMapRepliesIn = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 3, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispGlobalStatsMapRepliesIn.setStatus('current')
if mibBuilder.loadTexts: lispGlobalStatsMapRepliesIn.setDescription('Total number of map replies received by this device for any EID-Prefix of the given address family and Instance ID. Discontinuities in this monotonically increasing value occur at reinitialization of the management system. Discontinuities can also occur as a result of LISP features being removed, which can be detected by observing the value of lispFeaturesRouterTimeStamp.')
lispGlobalStatsMapRepliesOut = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 3, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispGlobalStatsMapRepliesOut.setStatus('current')
if mibBuilder.loadTexts: lispGlobalStatsMapRepliesOut.setDescription('Total number of map replies sent by this device for any EID prefix of the given address family and Instance ID. Discontinuities in this monotonically increasing value occur at reinitialization of the management system. Discontinuities can also occur as a result of LISP features being removed, which can be detected by observing the value of lispFeaturesRouterTimeStamp.')
lispGlobalStatsMapRegistersIn = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 3, 1, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispGlobalStatsMapRegistersIn.setStatus('current')
if mibBuilder.loadTexts: lispGlobalStatsMapRegistersIn.setDescription('Total number of map registers received by this device for any EID-Prefix of the given address family and Instance ID. Discontinuities in this monotonically increasing value occur at reinitialization of the management system. Discontinuities can also occur as a result of LISP features being removed, which can be detected by observing the value of lispFeaturesRouterTimeStamp.')
lispGlobalStatsMapRegistersOut = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 3, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispGlobalStatsMapRegistersOut.setStatus('current')
if mibBuilder.loadTexts: lispGlobalStatsMapRegistersOut.setDescription('Total number of map registers sent by this device for any EID-Prefix of the given address family and Instance ID. Discontinuities in this monotonically increasing value occur at reinitialization of the management system. Discontinuities can also occur as a result of LISP features being removed, which can be detected by observing the value of lispFeaturesRouterTimeStamp.')
lispMappingDatabaseTable = MibTable((1, 3, 6, 1, 2, 1, 220, 1, 4), )
if mibBuilder.loadTexts: lispMappingDatabaseTable.setReference('RFC 6830, Section 6.')
if mibBuilder.loadTexts: lispMappingDatabaseTable.setStatus('current')
if mibBuilder.loadTexts: lispMappingDatabaseTable.setDescription('This table represents the EID-to-RLOC mapping database that contains the EID-Prefix to RLOC mappings configured on an ETR. This table represents all such mappings for the given LISP site to which this device belongs.')
lispMappingDatabaseEntry = MibTableRow((1, 3, 6, 1, 2, 1, 220, 1, 4, 1), ).setIndexNames((0, "LISP-MIB", "lispMappingDatabaseEidLength"), (0, "LISP-MIB", "lispMappingDatabaseEid"))
if mibBuilder.loadTexts: lispMappingDatabaseEntry.setStatus('current')
if mibBuilder.loadTexts: lispMappingDatabaseEntry.setDescription('An entry (conceptual row) in lispMappingDatabaseTable.')
lispMappingDatabaseEidLength = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 39)))
if mibBuilder.loadTexts: lispMappingDatabaseEidLength.setStatus('current')
if mibBuilder.loadTexts: lispMappingDatabaseEidLength.setDescription('This object gives the octet-length of lispMappingDatabaseEid.')
lispMappingDatabaseEid = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 4, 1, 2), LispAddressType())
if mibBuilder.loadTexts: lispMappingDatabaseEid.setStatus('current')
if mibBuilder.loadTexts: lispMappingDatabaseEid.setDescription('The EID-Prefix of the mapping database.')
lispMappingDatabaseLsb = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 4, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMappingDatabaseLsb.setStatus('current')
if mibBuilder.loadTexts: lispMappingDatabaseLsb.setDescription('The locator status bits for this EID-Prefix.')
lispMappingDatabaseEidPartitioned = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 4, 1, 4), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMappingDatabaseEidPartitioned.setStatus('current')
if mibBuilder.loadTexts: lispMappingDatabaseEidPartitioned.setDescription('Indicates if this device is partitioned from the site that contains this EID-Prefix. If this object is true, then it means this device is partitioned from the site.')
lispMappingDatabaseTimeStamp = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 4, 1, 5), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMappingDatabaseTimeStamp.setStatus('current')
if mibBuilder.loadTexts: lispMappingDatabaseTimeStamp.setDescription('The value of sysUpTime at which the EID Prefix information represented by this mapping database entry was configured on this device. If this information was present at the most recent reinitialization of the local management subsystem, then this object contains a zero value.')
lispMappingDatabaseDecapOctets = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 4, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMappingDatabaseDecapOctets.setStatus('current')
if mibBuilder.loadTexts: lispMappingDatabaseDecapOctets.setDescription('The number of octets, after decapsulation, of LISP packets that were decapsulated by this device addressed to a host within this EID-Prefix. Discontinuities in this monotonically increasing value occur at reinitialization of the management system. Discontinuities can also occur as a result of LISP features being removed, which can be detected by observing the value of lispMappingDatabaseTimeStamp.')
lispMappingDatabaseDecapPackets = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 4, 1, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMappingDatabaseDecapPackets.setStatus('current')
if mibBuilder.loadTexts: lispMappingDatabaseDecapPackets.setDescription('The number of LISP packets that were decapsulated by this device addressed to a host within this EID-Prefix. Discontinuities in this monotonically increasing value occur at reinitialization of the management system. Discontinuities can also occur as a result of LISP features being removed, which can be detected by observing the value of lispMappingDatabaseTimeStamp.')
lispMappingDatabaseEncapOctets = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 4, 1, 8), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMappingDatabaseEncapOctets.setStatus('current')
if mibBuilder.loadTexts: lispMappingDatabaseEncapOctets.setDescription('The number of octets, before encapsulation, of LISP packets that were encapsulated by this device, whose inner header source address matched this EID-Prefix. Discontinuities in this monotonically increasing value occur at reinitialization of the management system. Discontinuities can also occur as a result of LISP features being removed, which can be detected by observing the value of lispMappingDatabaseTimeStamp.')
lispMappingDatabaseEncapPackets = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 4, 1, 9), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMappingDatabaseEncapPackets.setStatus('current')
if mibBuilder.loadTexts: lispMappingDatabaseEncapPackets.setDescription('The number of LISP packets that were encapsulated by this device whose inner header source address matched this EID prefix. Discontinuities in this monotonically increasing value occur at reinitialization of the management system. Discontinuities can also occur as a result of LISP features being removed, which can be detected by observing the value of lispMappingDatabaseTimeStamp.')
lispMappingDatabaseLocatorTable = MibTable((1, 3, 6, 1, 2, 1, 220, 1, 5), )
if mibBuilder.loadTexts: lispMappingDatabaseLocatorTable.setReference('RFC 6830, Section 6.2.')
if mibBuilder.loadTexts: lispMappingDatabaseLocatorTable.setStatus('current')
if mibBuilder.loadTexts: lispMappingDatabaseLocatorTable.setDescription('This table represents the set of routing locators per EID prefix contained in the EID-to-RLOC database configured on this ETR.')
lispMappingDatabaseLocatorEntry = MibTableRow((1, 3, 6, 1, 2, 1, 220, 1, 5, 1), ).setIndexNames((0, "LISP-MIB", "lispMappingDatabaseEidLength"), (0, "LISP-MIB", "lispMappingDatabaseEid"), (0, "LISP-MIB", "lispMappingDatabaseLocatorRlocLength"), (0, "LISP-MIB", "lispMappingDatabaseLocatorRloc"))
if mibBuilder.loadTexts: lispMappingDatabaseLocatorEntry.setStatus('current')
if mibBuilder.loadTexts: lispMappingDatabaseLocatorEntry.setDescription('An entry (conceptual row) in the lispMappingDatabaseLocatorTable.')
lispMappingDatabaseLocatorRlocLength = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 5, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 39)))
if mibBuilder.loadTexts: lispMappingDatabaseLocatorRlocLength.setStatus('current')
if mibBuilder.loadTexts: lispMappingDatabaseLocatorRlocLength.setDescription('This object is used to get the octet-length of lispMappingDatabaseLocatorRloc.')
lispMappingDatabaseLocatorRloc = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 5, 1, 2), LispAddressType())
if mibBuilder.loadTexts: lispMappingDatabaseLocatorRloc.setStatus('current')
if mibBuilder.loadTexts: lispMappingDatabaseLocatorRloc.setDescription('This object is a locator for the given EID-Prefix in the mapping database.')
lispMappingDatabaseLocatorRlocPriority = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 5, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMappingDatabaseLocatorRlocPriority.setStatus('current')
if mibBuilder.loadTexts: lispMappingDatabaseLocatorRlocPriority.setDescription('The unicast priority of the RLOC.')
lispMappingDatabaseLocatorRlocWeight = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 5, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMappingDatabaseLocatorRlocWeight.setStatus('current')
if mibBuilder.loadTexts: lispMappingDatabaseLocatorRlocWeight.setDescription('The unicast weight of the RLOC.')
lispMappingDatabaseLocatorRlocMPriority = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 5, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMappingDatabaseLocatorRlocMPriority.setStatus('current')
if mibBuilder.loadTexts: lispMappingDatabaseLocatorRlocMPriority.setDescription('The multicast priority of the RLOC.')
lispMappingDatabaseLocatorRlocMWeight = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 5, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMappingDatabaseLocatorRlocMWeight.setStatus('current')
if mibBuilder.loadTexts: lispMappingDatabaseLocatorRlocMWeight.setDescription('The multicast weight of the RLOC.')
lispMappingDatabaseLocatorRlocState = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 5, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("up", 1), ("down", 2), ("unreachable", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMappingDatabaseLocatorRlocState.setStatus('current')
if mibBuilder.loadTexts: lispMappingDatabaseLocatorRlocState.setDescription('The state of this RLOC as per this device. (1 = RLOC is up; 2 = RLOC is down; 3 = RLOC is unreachable).')
lispMappingDatabaseLocatorRlocLocal = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 5, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("siteself", 1), ("sitelocal", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMappingDatabaseLocatorRlocLocal.setStatus('current')
if mibBuilder.loadTexts: lispMappingDatabaseLocatorRlocLocal.setDescription('Indicates whether the RLOC is local to this device (or remote, meaning local to another device in the same LISP site). (1 = RLOC is an address on this device; 2 = RLOC is an address on another device).')
lispMappingDatabaseLocatorRlocTimeStamp = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 5, 1, 9), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMappingDatabaseLocatorRlocTimeStamp.setStatus('current')
if mibBuilder.loadTexts: lispMappingDatabaseLocatorRlocTimeStamp.setDescription('The value of sysUpTime at which the RLOC of the EID Prefix represented by this mapping database entry was configured on this device. If this information was present at the most recent reinitialization of the local management subsystem, then this object contains a zero value.')
lispMappingDatabaseLocatorRlocDecapOctets = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 5, 1, 10), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMappingDatabaseLocatorRlocDecapOctets.setStatus('current')
if mibBuilder.loadTexts: lispMappingDatabaseLocatorRlocDecapOctets.setDescription('The number of octets of LISP packets that were addressed to this RLOC of the EID-Prefix and were decapsulated. Discontinuities in this monotonically increasing value occur at reinitialization of the management system. Discontinuities can also occur as a result of database mappings getting reconfigured or RLOC status changes, which can be detected by observing the value of lispMappingDatabaseLocatorRlocTimeStamp.')
lispMappingDatabaseLocatorRlocDecapPackets = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 5, 1, 11), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMappingDatabaseLocatorRlocDecapPackets.setStatus('current')
if mibBuilder.loadTexts: lispMappingDatabaseLocatorRlocDecapPackets.setDescription('The number of LISP packets that were addressed to this RLOC of the EID-Prefix and were decapsulated. Discontinuities in this monotonically increasing value occur at reinitialization of the management system. Discontinuities can also occur as a result of database mappings getting reconfigured or RLOC status changes, which can be detected by observing the value of lispMappingDatabaseLocatorRlocTimeStamp.')
lispMappingDatabaseLocatorRlocEncapOctets = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 5, 1, 12), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMappingDatabaseLocatorRlocEncapOctets.setStatus('current')
if mibBuilder.loadTexts: lispMappingDatabaseLocatorRlocEncapOctets.setDescription('The number of octets of LISP packets that were encapsulated by this device using this RLOC address as the source, and that were sourced by an address of this EID-Prefix. Discontinuities in this monotonically increasing value occur at reinitialization of the management system. Discontinuities can also occur as a result of database mappings getting reconfigured or RLOC status changes, which can be detected by observing the value of lispMappingDatabaseLocatorRlocTimeStamp.')
lispMappingDatabaseLocatorRlocEncapPackets = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 5, 1, 13), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMappingDatabaseLocatorRlocEncapPackets.setStatus('current')
if mibBuilder.loadTexts: lispMappingDatabaseLocatorRlocEncapPackets.setDescription('The number of LISP packets that were encapsulated by this device using this RLOC address as the source and that were sourced by an address of this EID-Prefix. Discontinuities in this monotonically increasing value occur at reinitialization of the management system. Discontinuities can also occur as a result of database mappings getting reconfigured or RLOC status changes, which can be detected by observing the value of lispMappingDatabaseLocatorRlocTimeStamp.')
lispMapCacheTable = MibTable((1, 3, 6, 1, 2, 1, 220, 1, 6), )
if mibBuilder.loadTexts: lispMapCacheTable.setReference('RFC 6830, Sections 6 and Section 12.')
if mibBuilder.loadTexts: lispMapCacheTable.setStatus('current')
if mibBuilder.loadTexts: lispMapCacheTable.setDescription('This table represents the short-lived, on-demand table on an ITR that stores, tracks, and is responsible for timing-out and otherwise validating EID-to-RLOC mappings.')
lispMapCacheEntry = MibTableRow((1, 3, 6, 1, 2, 1, 220, 1, 6, 1), ).setIndexNames((0, "LISP-MIB", "lispMapCacheEidLength"), (0, "LISP-MIB", "lispMapCacheEid"))
if mibBuilder.loadTexts: lispMapCacheEntry.setStatus('current')
if mibBuilder.loadTexts: lispMapCacheEntry.setDescription('An entry (conceptual row) in the lispMapCacheTable.')
lispMapCacheEidLength = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 6, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 39)))
if mibBuilder.loadTexts: lispMapCacheEidLength.setStatus('current')
if mibBuilder.loadTexts: lispMapCacheEidLength.setDescription('This object is used to get the octet-length of lispMapCacheEid.')
lispMapCacheEid = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 6, 1, 2), LispAddressType())
if mibBuilder.loadTexts: lispMapCacheEid.setStatus('current')
if mibBuilder.loadTexts: lispMapCacheEid.setDescription('The EID-Prefix in the mapping cache.')
lispMapCacheEidTimeStamp = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 6, 1, 3), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMapCacheEidTimeStamp.setStatus('current')
if mibBuilder.loadTexts: lispMapCacheEidTimeStamp.setDescription('The value of sysUpTime at which the EID Prefix information represented by this entry was learned by this device. If this information was present at the most recent reinitialization of the local management subsystem, then this object contains a zero value.')
lispMapCacheEidExpiryTime = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 6, 1, 4), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMapCacheEidExpiryTime.setStatus('current')
if mibBuilder.loadTexts: lispMapCacheEidExpiryTime.setDescription('The time remaining before the ITR times-out this EID-Prefix.')
lispMapCacheEidState = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 6, 1, 5), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMapCacheEidState.setStatus('current')
if mibBuilder.loadTexts: lispMapCacheEidState.setDescription('This object is used to indicate the activity of this EID prefix. If this object is true, then it means this EID prefix is seeing activity.')
lispMapCacheEidAuthoritative = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 6, 1, 6), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMapCacheEidAuthoritative.setStatus('current')
if mibBuilder.loadTexts: lispMapCacheEidAuthoritative.setDescription('This object is used to indicate whether the EID-Prefix was installed by an authoritative map-reply. If this object is true, then it means this EID-Prefix was installed by an authoritative map-reply.')
lispMapCacheEidDecapOctets = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 6, 1, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMapCacheEidDecapOctets.setStatus('current')
if mibBuilder.loadTexts: lispMapCacheEidDecapOctets.setDescription('The number of octets of LISP packets that were decapsulated by this device and were sourced from a remote host within this EID-Prefix. Discontinuities in this monotonically increasing value occur at reinitialization of the management system. Discontinuities can also occur as a result of cache being removed and replaced, which can be detected by observing the value of lispMapCacheEidTimeStamp.')
lispMapCacheEidDecapPackets = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 6, 1, 8), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMapCacheEidDecapPackets.setStatus('current')
if mibBuilder.loadTexts: lispMapCacheEidDecapPackets.setDescription('The number of LISP packets that were decapsulated by this device and were sourced from a remote host within this EID-Prefix. Discontinuities in this monotonically increasing value occur at reinitialization of the management system. Discontinuities can also occur as a result of cache being removed and replaced, which can be detected by observing the value of lispMapCacheEidTimeStamp.')
lispMapCacheEidEncapOctets = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 6, 1, 9), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMapCacheEidEncapOctets.setStatus('current')
if mibBuilder.loadTexts: lispMapCacheEidEncapOctets.setDescription('The number of octets of LISP packets that were encapsulated by this device using the given EID-Prefix in the map-cache. Discontinuities in this monotonically increasing value occur at reinitialization of the management system. Discontinuities can also occur as a result of cache being removed and replaced, which can be detected by observing the value of lispMapCacheEidTimeStamp.')
lispMapCacheEidEncapPackets = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 6, 1, 10), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMapCacheEidEncapPackets.setStatus('current')
if mibBuilder.loadTexts: lispMapCacheEidEncapPackets.setDescription('The number of LISP packets that were encapsulated by this device using the given EID-Prefix in the map-cache. Discontinuities in this monotonically increasing value occur at reinitialization of the management system. Discontinuities can also occur as a result of cache being removed and replaced, which can be detected by observing the value of lispMapCacheEidTimeStamp.')
lispMapCacheLocatorTable = MibTable((1, 3, 6, 1, 2, 1, 220, 1, 7), )
if mibBuilder.loadTexts: lispMapCacheLocatorTable.setReference('RFC 6830, Section 6.3.')
if mibBuilder.loadTexts: lispMapCacheLocatorTable.setStatus('current')
if mibBuilder.loadTexts: lispMapCacheLocatorTable.setDescription('This table represents the set of locators per EID-Prefix contained in the map-cache table of an ITR.')
lispMapCacheLocatorEntry = MibTableRow((1, 3, 6, 1, 2, 1, 220, 1, 7, 1), ).setIndexNames((0, "LISP-MIB", "lispMapCacheEidLength"), (0, "LISP-MIB", "lispMapCacheEid"), (0, "LISP-MIB", "lispMapCacheLocatorRlocLength"), (0, "LISP-MIB", "lispMapCacheLocatorRloc"))
if mibBuilder.loadTexts: lispMapCacheLocatorEntry.setStatus('current')
if mibBuilder.loadTexts: lispMapCacheLocatorEntry.setDescription('An entry (conceptual row) in the lispMapCacheLocatorTable.')
lispMapCacheLocatorRlocLength = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 7, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 39)))
if mibBuilder.loadTexts: lispMapCacheLocatorRlocLength.setStatus('current')
if mibBuilder.loadTexts: lispMapCacheLocatorRlocLength.setDescription('This object is used to get the octet-length of lispMapCacheLocatorRloc.')
lispMapCacheLocatorRloc = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 7, 1, 2), LispAddressType())
if mibBuilder.loadTexts: lispMapCacheLocatorRloc.setStatus('current')
if mibBuilder.loadTexts: lispMapCacheLocatorRloc.setDescription('The locator for the EID-Prefix in the mapping cache.')
lispMapCacheLocatorRlocPriority = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 7, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMapCacheLocatorRlocPriority.setStatus('current')
if mibBuilder.loadTexts: lispMapCacheLocatorRlocPriority.setDescription('The unicast priority of the RLOC for this EID-Prefix (0-255); lower is more preferred.')
lispMapCacheLocatorRlocWeight = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 7, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMapCacheLocatorRlocWeight.setStatus('current')
if mibBuilder.loadTexts: lispMapCacheLocatorRlocWeight.setDescription('The unicast weight of the RLOC for this EID-Prefix (0 - 100) percentage.')
lispMapCacheLocatorRlocMPriority = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 7, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMapCacheLocatorRlocMPriority.setStatus('current')
if mibBuilder.loadTexts: lispMapCacheLocatorRlocMPriority.setDescription('The multicast priority of the RLOC for this EID-Prefix (0-255); lower is more preferred.')
lispMapCacheLocatorRlocMWeight = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 7, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMapCacheLocatorRlocMWeight.setStatus('current')
if mibBuilder.loadTexts: lispMapCacheLocatorRlocMWeight.setDescription('The multicast weight of the RLOC for this EID-Prefix (0 - 100) percentage.')
lispMapCacheLocatorRlocState = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 7, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("up", 1), ("down", 2), ("unreachable", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMapCacheLocatorRlocState.setStatus('current')
if mibBuilder.loadTexts: lispMapCacheLocatorRlocState.setDescription('The state of this RLOC as per this device (1 = RLOC is up; 2 = RLOC is down; 3 = RLOC is unreachable).')
lispMapCacheLocatorRlocTimeStamp = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 7, 1, 8), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMapCacheLocatorRlocTimeStamp.setStatus('current')
if mibBuilder.loadTexts: lispMapCacheLocatorRlocTimeStamp.setDescription('The value of sysUpTime at which the RLOC of EID-Prefix information represented by this entry was learned by this device. If this information was present at the most recent reinitialization of the local management subsystem, then this object contains a zero value.')
lispMapCacheLocatorRlocLastPriorityChange = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 7, 1, 9), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMapCacheLocatorRlocLastPriorityChange.setStatus('current')
if mibBuilder.loadTexts: lispMapCacheLocatorRlocLastPriorityChange.setDescription('Time elapsed since the last change of the unicast priority of the RLOC for this EID-Prefix. Note that this is independent of lispMapCacheLocatorRlocTimeStamp.')
lispMapCacheLocatorRlocLastWeightChange = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 7, 1, 10), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMapCacheLocatorRlocLastWeightChange.setStatus('current')
if mibBuilder.loadTexts: lispMapCacheLocatorRlocLastWeightChange.setDescription('Time elapsed since the last change of the unicast weight of the RLOC for this EID-Prefix. Note that this is independent of lispMapCacheLocatorRlocTimeStamp.')
lispMapCacheLocatorRlocLastMPriorityChange = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 7, 1, 11), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMapCacheLocatorRlocLastMPriorityChange.setStatus('current')
if mibBuilder.loadTexts: lispMapCacheLocatorRlocLastMPriorityChange.setDescription('Time since the last change of the multicast priority of the RLOC for this EID-Prefix.')
lispMapCacheLocatorRlocLastMWeightChange = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 7, 1, 12), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMapCacheLocatorRlocLastMWeightChange.setStatus('current')
if mibBuilder.loadTexts: lispMapCacheLocatorRlocLastMWeightChange.setDescription('Time since the last change of the multicast weight of the RLOC for this EID-Prefix.')
lispMapCacheLocatorRlocLastStateChange = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 7, 1, 13), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMapCacheLocatorRlocLastStateChange.setStatus('current')
if mibBuilder.loadTexts: lispMapCacheLocatorRlocLastStateChange.setDescription('Time since the last change of the up/down state of the RLOC for this EID-Prefix.')
lispMapCacheLocatorRlocRtt = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 7, 1, 14), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMapCacheLocatorRlocRtt.setStatus('current')
if mibBuilder.loadTexts: lispMapCacheLocatorRlocRtt.setDescription('Round-trip time of RLOC probe and map-reply for this RLOC address for this prefix.')
lispMapCacheLocatorRlocDecapOctets = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 7, 1, 15), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMapCacheLocatorRlocDecapOctets.setStatus('current')
if mibBuilder.loadTexts: lispMapCacheLocatorRlocDecapOctets.setDescription('The number of octets of LISP packets that were decapsulated by this device and were sourced from a remote host within this EID-Prefix and were encapsulated for this RLOC. Discontinuities in this monotonically increasing value occur at reinitialization of the management system. Discontinuities can also occur as a result of RLOC of cache being removed and replaced, which can be detected by observing the value of lispMapCacheLocatorRlocTimeStamp.')
lispMapCacheLocatorRlocDecapPackets = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 7, 1, 16), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMapCacheLocatorRlocDecapPackets.setStatus('current')
if mibBuilder.loadTexts: lispMapCacheLocatorRlocDecapPackets.setDescription('The number of LISP packets that were decapsulated by this device and were sourced from a remote host within this EID-Prefix and were encapsulated for this RLOC. Discontinuities in this monotonically increasing value occur at reinitialization of the management system. Discontinuities can also occur as a result of RLOC of cache being removed and replaced, which can be detected by observing the value of lispMapCacheLocatorRlocTimeStamp.')
lispMapCacheLocatorRlocEncapOctets = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 7, 1, 17), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMapCacheLocatorRlocEncapOctets.setStatus('current')
if mibBuilder.loadTexts: lispMapCacheLocatorRlocEncapOctets.setDescription('The number of octets of LISP packets that matched this EID-Prefix and were encapsulated using this RLOC address. Discontinuities in this monotonically increasing value occur at reinitialization of the management system. Discontinuities can also occur as a result of RLOC of cache being removed and replaced, which can be detected by observing the value of lispMapCacheLocatorRlocTimeStamp.')
lispMapCacheLocatorRlocEncapPackets = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 7, 1, 18), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispMapCacheLocatorRlocEncapPackets.setStatus('current')
if mibBuilder.loadTexts: lispMapCacheLocatorRlocEncapPackets.setDescription('The number of LISP packets that matched this EID-Prefix and were encapsulated using this RLOC address. Discontinuities in this monotonically increasing value occur at reinitialization of the management system. Discontinuities can also occur as a result of RLOC of cache being removed and replaced, which can be detected by observing the value of lispMapCacheLocatorRlocTimeStamp.')
lispConfiguredLocatorTable = MibTable((1, 3, 6, 1, 2, 1, 220, 1, 8), )
if mibBuilder.loadTexts: lispConfiguredLocatorTable.setReference('RFC 6830, Section 6.3.')
if mibBuilder.loadTexts: lispConfiguredLocatorTable.setStatus('current')
if mibBuilder.loadTexts: lispConfiguredLocatorTable.setDescription('This table represents the set of routing locators configured on this device. Note that the addresses configured by Proxy-ITR are treated as routing locators and therefore can be part of this table.')
lispConfiguredLocatorEntry = MibTableRow((1, 3, 6, 1, 2, 1, 220, 1, 8, 1), ).setIndexNames((0, "LISP-MIB", "lispConfiguredLocatorRlocLength"), (0, "LISP-MIB", "lispConfiguredLocatorRloc"))
if mibBuilder.loadTexts: lispConfiguredLocatorEntry.setStatus('current')
if mibBuilder.loadTexts: lispConfiguredLocatorEntry.setDescription('An entry (conceptual row) in the lispConfiguredLocatorTable.')
lispConfiguredLocatorRlocLength = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 8, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 39)))
if mibBuilder.loadTexts: lispConfiguredLocatorRlocLength.setStatus('current')
if mibBuilder.loadTexts: lispConfiguredLocatorRlocLength.setDescription('This object is used to get the octet-length of lispConfiguredLocatorRloc.')
lispConfiguredLocatorRloc = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 8, 1, 2), LispAddressType())
if mibBuilder.loadTexts: lispConfiguredLocatorRloc.setStatus('current')
if mibBuilder.loadTexts: lispConfiguredLocatorRloc.setDescription('This object is an RLOC address configured on this device. It can be an RLOC that is local to this device or can be an RLOC that belongs to another ETR within the same site. Proxy-ITR address is treated as an RLOC.')
lispConfiguredLocatorRlocState = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 8, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("up", 1), ("down", 2), ("unreachable", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispConfiguredLocatorRlocState.setStatus('current')
if mibBuilder.loadTexts: lispConfiguredLocatorRlocState.setDescription('The state of this RLOC as per this device. (1 = RLOC is up; 2 = RLOC is down; 3 = RLOC is unreachable).')
lispConfiguredLocatorRlocLocal = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 8, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("siteself", 1), ("sitelocal", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispConfiguredLocatorRlocLocal.setStatus('current')
if mibBuilder.loadTexts: lispConfiguredLocatorRlocLocal.setDescription('Indicates whether the RLOC is local to this device (or remote, meaning local to another device in the same LISP site). (1 = RLOC is an address on this device; 2 = RLOC is an address on another device).')
lispConfiguredLocatorRlocTimeStamp = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 8, 1, 5), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispConfiguredLocatorRlocTimeStamp.setStatus('current')
if mibBuilder.loadTexts: lispConfiguredLocatorRlocTimeStamp.setDescription('The value of sysUpTime at which the RLOC was configured on this device. If this information was present at the most recent reinitialization of the local management subsystem, then this object contains a zero value.')
lispConfiguredLocatorRlocDecapOctets = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 8, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispConfiguredLocatorRlocDecapOctets.setStatus('current')
if mibBuilder.loadTexts: lispConfiguredLocatorRlocDecapOctets.setDescription('The number of octets of LISP packets that were addressed to this RLOC and were decapsulated. Discontinuities in this monotonically increasing value occur at reinitialization of the management system. Discontinuities can also occur as a result of configured RLOC being removed and replaced, which can be detected by observing the value of lispConfiguredLocatorRlocTimeStamp.')
lispConfiguredLocatorRlocDecapPackets = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 8, 1, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispConfiguredLocatorRlocDecapPackets.setStatus('current')
if mibBuilder.loadTexts: lispConfiguredLocatorRlocDecapPackets.setDescription('The number of LISP packets that were addressed to this RLOC and were decapsulated. Discontinuities in this monotonically increasing value occur at reinitialization of the management system. Discontinuities can also occur as a result of configured RLOC being removed and replaced, which can be detected by observing the value of lispConfiguredLocatorRlocTimeStamp.')
lispConfiguredLocatorRlocEncapOctets = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 8, 1, 8), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispConfiguredLocatorRlocEncapOctets.setStatus('current')
if mibBuilder.loadTexts: lispConfiguredLocatorRlocEncapOctets.setDescription('The number of octets of LISP packets that were encapsulated by this device using this RLOC address as the source. Discontinuities in this monotonically increasing value occur at reinitialization of the management system. Discontinuities can also occur as a result of configured RLOC being removed and replaced, which can be detected by observing the value of lispConfiguredLocatorRlocTimeStamp.')
lispConfiguredLocatorRlocEncapPackets = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 8, 1, 9), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispConfiguredLocatorRlocEncapPackets.setStatus('current')
if mibBuilder.loadTexts: lispConfiguredLocatorRlocEncapPackets.setDescription('The number of LISP packets that were encapsulated by this device using this RLOC address as the source. Discontinuities in this monotonically increasing value occur at reinitialization of the management system. Discontinuities can also occur as a result of configured RLOC being removed and replaced, which can be detected by observing the value of lispConfiguredLocatorRlocTimeStamp.')
lispEidRegistrationTable = MibTable((1, 3, 6, 1, 2, 1, 220, 1, 9), )
if mibBuilder.loadTexts: lispEidRegistrationTable.setReference('RFC 6833, Section 4.')
if mibBuilder.loadTexts: lispEidRegistrationTable.setStatus('current')
if mibBuilder.loadTexts: lispEidRegistrationTable.setDescription('This table provides the properties of each LISP EID-Prefix that is registered with this device when configured to be a Map-Server.')
lispEidRegistrationEntry = MibTableRow((1, 3, 6, 1, 2, 1, 220, 1, 9, 1), ).setIndexNames((0, "LISP-MIB", "lispEidRegistrationEidLength"), (0, "LISP-MIB", "lispEidRegistrationEid"))
if mibBuilder.loadTexts: lispEidRegistrationEntry.setStatus('current')
if mibBuilder.loadTexts: lispEidRegistrationEntry.setDescription('An entry (conceptual row) in the lispEidRegistrationTable.')
lispEidRegistrationEidLength = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 9, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 39)))
if mibBuilder.loadTexts: lispEidRegistrationEidLength.setStatus('current')
if mibBuilder.loadTexts: lispEidRegistrationEidLength.setDescription('This object is used to get the octet-length of lispEidRegistrationEid.')
lispEidRegistrationEid = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 9, 1, 2), LispAddressType())
if mibBuilder.loadTexts: lispEidRegistrationEid.setStatus('current')
if mibBuilder.loadTexts: lispEidRegistrationEid.setDescription('The EID-Prefix that is being registered.')
lispEidRegistrationSiteName = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 9, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 63))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispEidRegistrationSiteName.setStatus('current')
if mibBuilder.loadTexts: lispEidRegistrationSiteName.setDescription('Site name used by a Map-Server to distinguish different LISP sites that are registering with it.')
lispEidRegistrationSiteDescription = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 9, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispEidRegistrationSiteDescription.setStatus('current')
if mibBuilder.loadTexts: lispEidRegistrationSiteDescription.setDescription('Description for a site name used by a Map-Server. The EID prefix that is being registered belongs to this site.')
lispEidRegistrationIsRegistered = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 9, 1, 5), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispEidRegistrationIsRegistered.setStatus('current')
if mibBuilder.loadTexts: lispEidRegistrationIsRegistered.setDescription('Indicates the registration status of the given EID-Prefix. If this object is true, then it means the EID-Prefix is registered. The value false implies the EID-Prefix is not registered with the Map Server. There are multiple scenarios when this could happen like authentication failures, routing problems, misconfigs to name a few.')
lispEidRegistrationFirstTimeStamp = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 9, 1, 6), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispEidRegistrationFirstTimeStamp.setStatus('current')
if mibBuilder.loadTexts: lispEidRegistrationFirstTimeStamp.setDescription('The value of sysUpTime at which the first valid register message for the EID Prefix information represented by this entry was received by this device. If this information was present at the most recent reinitialization of the local management subsystem, then this object contains a zero value.')
lispEidRegistrationLastTimeStamp = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 9, 1, 7), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispEidRegistrationLastTimeStamp.setStatus('current')
if mibBuilder.loadTexts: lispEidRegistrationLastTimeStamp.setDescription('The value of sysUpTime at which the last valid register message for the EID Prefix information represented by this entry was received by this device. If this information was present at the most recent reinitialization of the local management subsystem, then this object contains a zero value.')
lispEidRegistrationLastRegisterSenderLength = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 9, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 39))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispEidRegistrationLastRegisterSenderLength.setStatus('current')
if mibBuilder.loadTexts: lispEidRegistrationLastRegisterSenderLength.setDescription('This object is used to get the octet-length of lispEidRegistrationLastRegisterSender, the next object.')
lispEidRegistrationLastRegisterSender = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 9, 1, 9), LispAddressType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispEidRegistrationLastRegisterSender.setStatus('current')
if mibBuilder.loadTexts: lispEidRegistrationLastRegisterSender.setDescription('Source address of the last valid register message for the given EID-Prefix that was received by this device.')
lispEidRegistrationAuthenticationErrors = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 9, 1, 10), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispEidRegistrationAuthenticationErrors.setStatus('current')
if mibBuilder.loadTexts: lispEidRegistrationAuthenticationErrors.setDescription('Count of total authentication errors of map-registers received for the given EID-Prefix. Discontinuities in this monotonically increasing value occur at reinitialization of the management system. Discontinuities can also occur as a result of site config changes, which can be detected by observing the value of lispEidRegistrationFirstTimeStamp.')
lispEidRegistrationRlocsMismatch = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 9, 1, 11), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispEidRegistrationRlocsMismatch.setStatus('current')
if mibBuilder.loadTexts: lispEidRegistrationRlocsMismatch.setDescription('Count of total map-registers received that had at least one RLOC that was not in the allowed list of RLOCs for the given EID-Prefix. Discontinuities in this monotonically increasing value occur at reinitialization of the management system. Discontinuities can also occur as a result of site config changes, which can be detected by observing the value of lispEidRegistrationFirstTimeStamp.')
lispEidRegistrationEtrTable = MibTable((1, 3, 6, 1, 2, 1, 220, 1, 10), )
if mibBuilder.loadTexts: lispEidRegistrationEtrTable.setReference('RFC 6830, Section 6.1.')
if mibBuilder.loadTexts: lispEidRegistrationEtrTable.setStatus('current')
if mibBuilder.loadTexts: lispEidRegistrationEtrTable.setDescription('This table provides the properties of ETRs that register the given EID-Prefix with this device when configured to be a Map-Server.')
lispEidRegistrationEtrEntry = MibTableRow((1, 3, 6, 1, 2, 1, 220, 1, 10, 1), ).setIndexNames((0, "LISP-MIB", "lispEidRegistrationEidLength"), (0, "LISP-MIB", "lispEidRegistrationEid"), (0, "LISP-MIB", "lispEidRegistrationEtrSenderLength"), (0, "LISP-MIB", "lispEidRegistrationEtrSender"))
if mibBuilder.loadTexts: lispEidRegistrationEtrEntry.setStatus('current')
if mibBuilder.loadTexts: lispEidRegistrationEtrEntry.setDescription('An entry (conceptual row) in the lispEidRegistrationEtrTable.')
lispEidRegistrationEtrSenderLength = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 10, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 39)))
if mibBuilder.loadTexts: lispEidRegistrationEtrSenderLength.setStatus('current')
if mibBuilder.loadTexts: lispEidRegistrationEtrSenderLength.setDescription('This object is used to get the octet-length of lispEidRegistrationEtrSender.')
lispEidRegistrationEtrSender = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 10, 1, 2), LispAddressType())
if mibBuilder.loadTexts: lispEidRegistrationEtrSender.setStatus('current')
if mibBuilder.loadTexts: lispEidRegistrationEtrSender.setDescription('Source address of the ETR that is sending valid register messages for this EID-Prefix to this device.')
lispEidRegistrationEtrLastTimeStamp = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 10, 1, 3), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispEidRegistrationEtrLastTimeStamp.setStatus('current')
if mibBuilder.loadTexts: lispEidRegistrationEtrLastTimeStamp.setDescription('The value of sysUpTime at which the last valid register message from this ETR for the EID Prefix information represented by this entry was received by this device. If this information was present at the most recent reinitialization of the local management subsystem, then this object contains a zero value.')
lispEidRegistrationEtrTtl = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 10, 1, 4), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispEidRegistrationEtrTtl.setStatus('current')
if mibBuilder.loadTexts: lispEidRegistrationEtrTtl.setDescription('The Record TTL of the registering ETR device for this EID-Prefix.')
lispEidRegistrationEtrProxyReply = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 10, 1, 5), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispEidRegistrationEtrProxyReply.setStatus('current')
if mibBuilder.loadTexts: lispEidRegistrationEtrProxyReply.setDescription('Indicates proxy-replying status of the registering ETR for this EID-Prefix. If this object is true, then it means the Map-Server can proxy-reply.')
lispEidRegistrationEtrWantsMapNotify = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 10, 1, 6), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispEidRegistrationEtrWantsMapNotify.setStatus('current')
if mibBuilder.loadTexts: lispEidRegistrationEtrWantsMapNotify.setDescription('Indicates whether the EID-Prefix wants Map-Notifications. If this object is true, then it means the EID-Prefix wants Map-Notifications.')
lispEidRegistrationLocatorTable = MibTable((1, 3, 6, 1, 2, 1, 220, 1, 11), )
if mibBuilder.loadTexts: lispEidRegistrationLocatorTable.setReference('RFC 6830, Section 6.1.')
if mibBuilder.loadTexts: lispEidRegistrationLocatorTable.setStatus('current')
if mibBuilder.loadTexts: lispEidRegistrationLocatorTable.setDescription('This table provides the properties of all locators per LISP site that are served by this device when configured to be a Map-Server.')
lispEidRegistrationLocatorEntry = MibTableRow((1, 3, 6, 1, 2, 1, 220, 1, 11, 1), ).setIndexNames((0, "LISP-MIB", "lispEidRegistrationEidLength"), (0, "LISP-MIB", "lispEidRegistrationEid"), (0, "LISP-MIB", "lispEidRegistrationEtrSenderLength"), (0, "LISP-MIB", "lispEidRegistrationEtrSender"), (0, "LISP-MIB", "lispEidRegistrationLocatorRlocLength"), (0, "LISP-MIB", "lispEidRegistrationLocatorRloc"))
if mibBuilder.loadTexts: lispEidRegistrationLocatorEntry.setStatus('current')
if mibBuilder.loadTexts: lispEidRegistrationLocatorEntry.setDescription('An entry (conceptual row) in the lispEidRegistrationLocatorTable.')
lispEidRegistrationLocatorRlocLength = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 11, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 39)))
if mibBuilder.loadTexts: lispEidRegistrationLocatorRlocLength.setStatus('current')
if mibBuilder.loadTexts: lispEidRegistrationLocatorRlocLength.setDescription('This object is used to get the octet-length of lispEidRegistrationLocatorRloc.')
lispEidRegistrationLocatorRloc = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 11, 1, 2), LispAddressType())
if mibBuilder.loadTexts: lispEidRegistrationLocatorRloc.setStatus('current')
if mibBuilder.loadTexts: lispEidRegistrationLocatorRloc.setDescription('The locator of the given EID-Prefix being registered by the given ETR with this device.')
lispEidRegistrationLocatorRlocState = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 11, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("up", 1), ("down", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispEidRegistrationLocatorRlocState.setStatus('current')
if mibBuilder.loadTexts: lispEidRegistrationLocatorRlocState.setDescription('The cached state of this RLOC received in map-register from the ETR by the device, in the capacity of a Map-Server. Value 1 refers to up, value 2 refers to down.')
lispEidRegistrationLocatorIsLocal = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 11, 1, 4), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispEidRegistrationLocatorIsLocal.setStatus('current')
if mibBuilder.loadTexts: lispEidRegistrationLocatorIsLocal.setDescription('Indicates if the given locator is local to the registering ETR. If this object is true, it means the locator is local.')
lispEidRegistrationLocatorPriority = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 11, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispEidRegistrationLocatorPriority.setStatus('current')
if mibBuilder.loadTexts: lispEidRegistrationLocatorPriority.setDescription('The unicast priority of the RLOC for this EID-Prefix in the register message sent by the given ETR.')
lispEidRegistrationLocatorWeight = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 11, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispEidRegistrationLocatorWeight.setStatus('current')
if mibBuilder.loadTexts: lispEidRegistrationLocatorWeight.setDescription('The unicast weight of the RLOC for this EID-Prefix in the register message sent by the given ETR.')
lispEidRegistrationLocatorMPriority = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 11, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispEidRegistrationLocatorMPriority.setStatus('current')
if mibBuilder.loadTexts: lispEidRegistrationLocatorMPriority.setDescription('The multicast priority of the RLOC for this EID-Prefix in the register message sent by the given ETR.')
lispEidRegistrationLocatorMWeight = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 11, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispEidRegistrationLocatorMWeight.setStatus('current')
if mibBuilder.loadTexts: lispEidRegistrationLocatorMWeight.setDescription('The multicast weight of the RLOC for this EID-Prefix in the register message sent by the given ETR.')
lispUseMapServerTable = MibTable((1, 3, 6, 1, 2, 1, 220, 1, 12), )
if mibBuilder.loadTexts: lispUseMapServerTable.setReference('RFC 6833, Section 4.3.')
if mibBuilder.loadTexts: lispUseMapServerTable.setStatus('current')
if mibBuilder.loadTexts: lispUseMapServerTable.setDescription('This table provides the properties of the Map-Server(s) with which this device is configured to register.')
lispUseMapServerEntry = MibTableRow((1, 3, 6, 1, 2, 1, 220, 1, 12, 1), ).setIndexNames((0, "LISP-MIB", "lispUseMapServerAddressLength"), (0, "LISP-MIB", "lispUseMapServerAddress"))
if mibBuilder.loadTexts: lispUseMapServerEntry.setStatus('current')
if mibBuilder.loadTexts: lispUseMapServerEntry.setDescription('An entry (conceptual row) in the lispUseMapServerTable.')
lispUseMapServerAddressLength = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 12, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 39)))
if mibBuilder.loadTexts: lispUseMapServerAddressLength.setStatus('current')
if mibBuilder.loadTexts: lispUseMapServerAddressLength.setDescription('This object is used to get the octet-length of lispUseMapServerAddress.')
lispUseMapServerAddress = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 12, 1, 2), LispAddressType())
if mibBuilder.loadTexts: lispUseMapServerAddress.setStatus('current')
if mibBuilder.loadTexts: lispUseMapServerAddress.setDescription('Address of a Map-Server configured on this device.')
lispUseMapServerState = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 12, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("up", 1), ("down", 2), ("unreachable", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispUseMapServerState.setStatus('current')
if mibBuilder.loadTexts: lispUseMapServerState.setDescription('State of this Map-Server configured on this device (1 = Map-Server is up; 2 = Map-Server is down).')
lispUseMapResolverTable = MibTable((1, 3, 6, 1, 2, 1, 220, 1, 13), )
if mibBuilder.loadTexts: lispUseMapResolverTable.setReference('RFC 6833, Section 4.4.')
if mibBuilder.loadTexts: lispUseMapResolverTable.setStatus('current')
if mibBuilder.loadTexts: lispUseMapResolverTable.setDescription('This table provides the properties of the Map-Resolver(s) this device is configured to use.')
lispUseMapResolverEntry = MibTableRow((1, 3, 6, 1, 2, 1, 220, 1, 13, 1), ).setIndexNames((0, "LISP-MIB", "lispUseMapResolverAddressLength"), (0, "LISP-MIB", "lispUseMapResolverAddress"))
if mibBuilder.loadTexts: lispUseMapResolverEntry.setStatus('current')
if mibBuilder.loadTexts: lispUseMapResolverEntry.setDescription('An entry (conceptual row) in the lispUseMapResolverTable.')
lispUseMapResolverAddressLength = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 13, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 39)))
if mibBuilder.loadTexts: lispUseMapResolverAddressLength.setStatus('current')
if mibBuilder.loadTexts: lispUseMapResolverAddressLength.setDescription('This object is used to get the octet-length of lispUseMapResolverAddress.')
lispUseMapResolverAddress = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 13, 1, 2), LispAddressType())
if mibBuilder.loadTexts: lispUseMapResolverAddress.setStatus('current')
if mibBuilder.loadTexts: lispUseMapResolverAddress.setDescription('Address of Map-Resolver configured on this device.')
lispUseMapResolverState = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 13, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("up", 1), ("down", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispUseMapResolverState.setStatus('current')
if mibBuilder.loadTexts: lispUseMapResolverState.setDescription('State of this Map-Resolver configured on this device (1 = Map-Resolver is up; 2 = Map-Resolver is down).')
lispUseProxyEtrTable = MibTable((1, 3, 6, 1, 2, 1, 220, 1, 14), )
if mibBuilder.loadTexts: lispUseProxyEtrTable.setReference('RFC 6830, Section 6.')
if mibBuilder.loadTexts: lispUseProxyEtrTable.setStatus('current')
if mibBuilder.loadTexts: lispUseProxyEtrTable.setDescription('This table provides the properties of all Proxy ETRs that this device is configured to use.')
lispUseProxyEtrEntry = MibTableRow((1, 3, 6, 1, 2, 1, 220, 1, 14, 1), ).setIndexNames((0, "LISP-MIB", "lispUseProxyEtrAddressLength"), (0, "LISP-MIB", "lispUseProxyEtrAddress"))
if mibBuilder.loadTexts: lispUseProxyEtrEntry.setStatus('current')
if mibBuilder.loadTexts: lispUseProxyEtrEntry.setDescription('An entry (conceptual row) in the lispUseProxyEtrTable.')
lispUseProxyEtrAddressLength = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 14, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 39)))
if mibBuilder.loadTexts: lispUseProxyEtrAddressLength.setStatus('current')
if mibBuilder.loadTexts: lispUseProxyEtrAddressLength.setDescription('This object is used to get the octet-length of lispUseProxyEtrAddress.')
lispUseProxyEtrAddress = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 14, 1, 2), LispAddressType())
if mibBuilder.loadTexts: lispUseProxyEtrAddress.setStatus('current')
if mibBuilder.loadTexts: lispUseProxyEtrAddress.setDescription('Address of Proxy ETR configured on this device.')
lispUseProxyEtrPriority = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 14, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispUseProxyEtrPriority.setStatus('current')
if mibBuilder.loadTexts: lispUseProxyEtrPriority.setDescription('The unicast priority of the PETR locator.')
lispUseProxyEtrWeight = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 14, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispUseProxyEtrWeight.setStatus('current')
if mibBuilder.loadTexts: lispUseProxyEtrWeight.setDescription('The unicast weight of the PETR locator.')
lispUseProxyEtrMPriority = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 14, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispUseProxyEtrMPriority.setStatus('current')
if mibBuilder.loadTexts: lispUseProxyEtrMPriority.setDescription('The multicast priority of the PETR locator.')
lispUseProxyEtrMWeight = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 14, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispUseProxyEtrMWeight.setStatus('current')
if mibBuilder.loadTexts: lispUseProxyEtrMWeight.setDescription('The multicast weight of the PETR locator.')
lispUseProxyEtrState = MibTableColumn((1, 3, 6, 1, 2, 1, 220, 1, 14, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("down", 0), ("up", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lispUseProxyEtrState.setStatus('current')
if mibBuilder.loadTexts: lispUseProxyEtrState.setDescription('State of this Proxy ETR configured on this device (0 = Proxy ETR is down; 1 = Proxy ETR is up).')
lispCompliances = MibIdentifier((1, 3, 6, 1, 2, 1, 220, 2, 1))
lispGroups = MibIdentifier((1, 3, 6, 1, 2, 1, 220, 2, 2))
lispMIBComplianceEtr = ModuleCompliance((1, 3, 6, 1, 2, 1, 220, 2, 1, 1)).setObjects(("LISP-MIB", "lispMIBEtrGroup"), ("LISP-MIB", "lispMIBItrGroup"), ("LISP-MIB", "lispMIBPetrGroup"), ("LISP-MIB", "lispMIBPitrGroup"), ("LISP-MIB", "lispMIBMapServerGroup"), ("LISP-MIB", "lispMIBMapResolverGroup"), ("LISP-MIB", "lispMIBEtrExtendedGroup"), ("LISP-MIB", "lispMIBItrExtendedGroup"), ("LISP-MIB", "lispMIBMapServerExtendedGroup"), ("LISP-MIB", "lispMIBTuningParametersGroup"), ("LISP-MIB", "lispMIBEncapStatisticsGroup"), ("LISP-MIB", "lispMIBDecapStatisticsGroup"), ("LISP-MIB", "lispMIBDiagnosticsGroup"), ("LISP-MIB", "lispMIBVrfGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
lispMIBComplianceEtr = lispMIBComplianceEtr.setStatus('current')
if mibBuilder.loadTexts: lispMIBComplianceEtr.setDescription('The compliance statement for LISP ETRs. It conveys whether the device supports the ETR feature, and, if so, the relevant state associated with that feature.')
lispMIBComplianceItr = ModuleCompliance((1, 3, 6, 1, 2, 1, 220, 2, 1, 2)).setObjects(("LISP-MIB", "lispMIBItrGroup"), ("LISP-MIB", "lispMIBEtrGroup"), ("LISP-MIB", "lispMIBPetrGroup"), ("LISP-MIB", "lispMIBPitrGroup"), ("LISP-MIB", "lispMIBMapServerGroup"), ("LISP-MIB", "lispMIBMapResolverGroup"), ("LISP-MIB", "lispMIBEtrExtendedGroup"), ("LISP-MIB", "lispMIBItrExtendedGroup"), ("LISP-MIB", "lispMIBMapServerExtendedGroup"), ("LISP-MIB", "lispMIBTuningParametersGroup"), ("LISP-MIB", "lispMIBEncapStatisticsGroup"), ("LISP-MIB", "lispMIBDecapStatisticsGroup"), ("LISP-MIB", "lispMIBDiagnosticsGroup"), ("LISP-MIB", "lispMIBVrfGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
lispMIBComplianceItr = lispMIBComplianceItr.setStatus('current')
if mibBuilder.loadTexts: lispMIBComplianceItr.setDescription('The compliance statement for LISP ITRs. It conveys whether the device supports the ITR feature, and, if so, the relevant state associated with that feature.')
lispMIBCompliancePetr = ModuleCompliance((1, 3, 6, 1, 2, 1, 220, 2, 1, 3)).setObjects(("LISP-MIB", "lispMIBPetrGroup"), ("LISP-MIB", "lispMIBEtrGroup"), ("LISP-MIB", "lispMIBItrGroup"), ("LISP-MIB", "lispMIBPitrGroup"), ("LISP-MIB", "lispMIBMapServerGroup"), ("LISP-MIB", "lispMIBMapResolverGroup"), ("LISP-MIB", "lispMIBEtrExtendedGroup"), ("LISP-MIB", "lispMIBItrExtendedGroup"), ("LISP-MIB", "lispMIBMapServerExtendedGroup"), ("LISP-MIB", "lispMIBTuningParametersGroup"), ("LISP-MIB", "lispMIBEncapStatisticsGroup"), ("LISP-MIB", "lispMIBDecapStatisticsGroup"), ("LISP-MIB", "lispMIBDiagnosticsGroup"), ("LISP-MIB", "lispMIBVrfGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
lispMIBCompliancePetr = lispMIBCompliancePetr.setStatus('current')
if mibBuilder.loadTexts: lispMIBCompliancePetr.setDescription('The compliance statement for LISP Proxy-ETRs. It conveys whether the device supports the Proxy-ETR feature, and, if so, the relevant state associated with that feature.')
lispMIBCompliancePitr = ModuleCompliance((1, 3, 6, 1, 2, 1, 220, 2, 1, 4)).setObjects(("LISP-MIB", "lispMIBPitrGroup"), ("LISP-MIB", "lispMIBEtrGroup"), ("LISP-MIB", "lispMIBItrGroup"), ("LISP-MIB", "lispMIBPetrGroup"), ("LISP-MIB", "lispMIBMapServerGroup"), ("LISP-MIB", "lispMIBMapResolverGroup"), ("LISP-MIB", "lispMIBEtrExtendedGroup"), ("LISP-MIB", "lispMIBItrExtendedGroup"), ("LISP-MIB", "lispMIBMapServerExtendedGroup"), ("LISP-MIB", "lispMIBTuningParametersGroup"), ("LISP-MIB", "lispMIBEncapStatisticsGroup"), ("LISP-MIB", "lispMIBDecapStatisticsGroup"), ("LISP-MIB", "lispMIBDiagnosticsGroup"), ("LISP-MIB", "lispMIBVrfGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
lispMIBCompliancePitr = lispMIBCompliancePitr.setStatus('current')
if mibBuilder.loadTexts: lispMIBCompliancePitr.setDescription('The compliance statement for LISP Proxy-ITRs. It conveys whether the device supports the Proxy-ITR feature, and, if so, the relevant state associated with that feature.')
lispMIBComplianceMapServer = ModuleCompliance((1, 3, 6, 1, 2, 1, 220, 2, 1, 5)).setObjects(("LISP-MIB", "lispMIBMapServerGroup"), ("LISP-MIB", "lispMIBEtrGroup"), ("LISP-MIB", "lispMIBItrGroup"), ("LISP-MIB", "lispMIBPetrGroup"), ("LISP-MIB", "lispMIBPitrGroup"), ("LISP-MIB", "lispMIBMapResolverGroup"), ("LISP-MIB", "lispMIBEtrExtendedGroup"), ("LISP-MIB", "lispMIBItrExtendedGroup"), ("LISP-MIB", "lispMIBMapServerExtendedGroup"), ("LISP-MIB", "lispMIBTuningParametersGroup"), ("LISP-MIB", "lispMIBEncapStatisticsGroup"), ("LISP-MIB", "lispMIBDecapStatisticsGroup"), ("LISP-MIB", "lispMIBDiagnosticsGroup"), ("LISP-MIB", "lispMIBVrfGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
lispMIBComplianceMapServer = lispMIBComplianceMapServer.setStatus('current')
if mibBuilder.loadTexts: lispMIBComplianceMapServer.setDescription('The compliance statement for LISP Map Servers. It conveys whether the device supports the Map Server feature, and, if so, the relevant state associated with that feature.')
lispMIBComplianceMapResolver = ModuleCompliance((1, 3, 6, 1, 2, 1, 220, 2, 1, 6)).setObjects(("LISP-MIB", "lispMIBMapResolverGroup"), ("LISP-MIB", "lispMIBEtrGroup"), ("LISP-MIB", "lispMIBItrGroup"), ("LISP-MIB", "lispMIBPetrGroup"), ("LISP-MIB", "lispMIBPitrGroup"), ("LISP-MIB", "lispMIBMapServerGroup"), ("LISP-MIB", "lispMIBEtrExtendedGroup"), ("LISP-MIB", "lispMIBItrExtendedGroup"), ("LISP-MIB", "lispMIBMapServerExtendedGroup"), ("LISP-MIB", "lispMIBTuningParametersGroup"), ("LISP-MIB", "lispMIBEncapStatisticsGroup"), ("LISP-MIB", "lispMIBDecapStatisticsGroup"), ("LISP-MIB", "lispMIBDiagnosticsGroup"), ("LISP-MIB", "lispMIBVrfGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
lispMIBComplianceMapResolver = lispMIBComplianceMapResolver.setStatus('current')
if mibBuilder.loadTexts: lispMIBComplianceMapResolver.setDescription('The compliance statement for LISP Map Resolvers. It conveys whether the device supports the Map Resolver feature, and, if so, the relevant state associated with that feature.')
lispMIBEtrGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 220, 2, 2, 1)).setObjects(("LISP-MIB", "lispFeaturesEtrEnabled"), ("LISP-MIB", "lispMappingDatabaseLsb"), ("LISP-MIB", "lispMappingDatabaseLocatorRlocPriority"), ("LISP-MIB", "lispMappingDatabaseLocatorRlocWeight"), ("LISP-MIB", "lispMappingDatabaseLocatorRlocMPriority"), ("LISP-MIB", "lispMappingDatabaseLocatorRlocMWeight"), ("LISP-MIB", "lispMappingDatabaseLocatorRlocState"), ("LISP-MIB", "lispMappingDatabaseLocatorRlocLocal"), ("LISP-MIB", "lispConfiguredLocatorRlocState"), ("LISP-MIB", "lispConfiguredLocatorRlocLocal"), ("LISP-MIB", "lispUseMapServerState"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
lispMIBEtrGroup = lispMIBEtrGroup.setStatus('current')
if mibBuilder.loadTexts: lispMIBEtrGroup.setDescription('A collection of objects to support reporting of basic LISP ETR parameters.')
lispMIBItrGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 220, 2, 2, 2)).setObjects(("LISP-MIB", "lispFeaturesItrEnabled"), ("LISP-MIB", "lispFeaturesMapCacheSize"), ("LISP-MIB", "lispMappingDatabaseLsb"), ("LISP-MIB", "lispMapCacheLocatorRlocPriority"), ("LISP-MIB", "lispMapCacheLocatorRlocWeight"), ("LISP-MIB", "lispMapCacheLocatorRlocMPriority"), ("LISP-MIB", "lispMapCacheLocatorRlocMWeight"), ("LISP-MIB", "lispMapCacheLocatorRlocState"), ("LISP-MIB", "lispMapCacheEidTimeStamp"), ("LISP-MIB", "lispMapCacheEidExpiryTime"), ("LISP-MIB", "lispUseMapResolverState"), ("LISP-MIB", "lispUseProxyEtrPriority"), ("LISP-MIB", "lispUseProxyEtrWeight"), ("LISP-MIB", "lispUseProxyEtrMPriority"), ("LISP-MIB", "lispUseProxyEtrMWeight"), ("LISP-MIB", "lispUseProxyEtrState"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
lispMIBItrGroup = lispMIBItrGroup.setStatus('current')
if mibBuilder.loadTexts: lispMIBItrGroup.setDescription('A collection of objects to support reporting of basic LISP ITR parameters.')
lispMIBPetrGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 220, 2, 2, 3)).setObjects(("LISP-MIB", "lispFeaturesProxyEtrEnabled"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
lispMIBPetrGroup = lispMIBPetrGroup.setStatus('current')
if mibBuilder.loadTexts: lispMIBPetrGroup.setDescription('A collection of objects to support reporting of basic LISP Proxy-ETR parameters.')
lispMIBPitrGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 220, 2, 2, 4)).setObjects(("LISP-MIB", "lispFeaturesProxyItrEnabled"), ("LISP-MIB", "lispConfiguredLocatorRlocState"), ("LISP-MIB", "lispConfiguredLocatorRlocLocal"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
lispMIBPitrGroup = lispMIBPitrGroup.setStatus('current')
if mibBuilder.loadTexts: lispMIBPitrGroup.setDescription('A collection of objects to support reporting of basic LISP Proxy-ITR parameters.')
lispMIBMapServerGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 220, 2, 2, 5)).setObjects(("LISP-MIB", "lispFeaturesMapServerEnabled"), ("LISP-MIB", "lispEidRegistrationIsRegistered"), ("LISP-MIB", "lispEidRegistrationLocatorRlocState"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
lispMIBMapServerGroup = lispMIBMapServerGroup.setStatus('current')
if mibBuilder.loadTexts: lispMIBMapServerGroup.setDescription('A collection of objects to support reporting of basic LISP Map Server parameters.')
lispMIBMapResolverGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 220, 2, 2, 6)).setObjects(("LISP-MIB", "lispFeaturesMapResolverEnabled"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
lispMIBMapResolverGroup = lispMIBMapResolverGroup.setStatus('current')
if mibBuilder.loadTexts: lispMIBMapResolverGroup.setDescription('A collection of objects to support reporting of basic LISP Map Resolver parameters.')
lispMIBEtrExtendedGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 220, 2, 2, 7)).setObjects(("LISP-MIB", "lispFeaturesRlocProbeEnabled"), ("LISP-MIB", "lispFeaturesEtrAcceptMapDataEnabled"), ("LISP-MIB", "lispFeaturesEtrAcceptMapDataVerifyEnabled"), ("LISP-MIB", "lispMappingDatabaseEidPartitioned"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
lispMIBEtrExtendedGroup = lispMIBEtrExtendedGroup.setStatus('current')
if mibBuilder.loadTexts: lispMIBEtrExtendedGroup.setDescription('A collection of objects to support reporting of LISP features and properties on ETRs.')
lispMIBItrExtendedGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 220, 2, 2, 8)).setObjects(("LISP-MIB", "lispFeaturesRlocProbeEnabled"), ("LISP-MIB", "lispMapCacheEidState"), ("LISP-MIB", "lispMapCacheEidAuthoritative"), ("LISP-MIB", "lispMapCacheLocatorRlocTimeStamp"), ("LISP-MIB", "lispMapCacheLocatorRlocLastPriorityChange"), ("LISP-MIB", "lispMapCacheLocatorRlocLastWeightChange"), ("LISP-MIB", "lispMapCacheLocatorRlocLastMPriorityChange"), ("LISP-MIB", "lispMapCacheLocatorRlocLastMWeightChange"), ("LISP-MIB", "lispMapCacheLocatorRlocLastStateChange"), ("LISP-MIB", "lispMapCacheLocatorRlocRtt"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
lispMIBItrExtendedGroup = lispMIBItrExtendedGroup.setStatus('current')
if mibBuilder.loadTexts: lispMIBItrExtendedGroup.setDescription('A collection of objects to support reporting of LISP features and properties on ITRs.')
lispMIBMapServerExtendedGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 220, 2, 2, 9)).setObjects(("LISP-MIB", "lispEidRegistrationSiteName"), ("LISP-MIB", "lispEidRegistrationSiteDescription"), ("LISP-MIB", "lispEidRegistrationIsRegistered"), ("LISP-MIB", "lispEidRegistrationFirstTimeStamp"), ("LISP-MIB", "lispEidRegistrationLastTimeStamp"), ("LISP-MIB", "lispEidRegistrationLastRegisterSenderLength"), ("LISP-MIB", "lispEidRegistrationLastRegisterSender"), ("LISP-MIB", "lispEidRegistrationEtrLastTimeStamp"), ("LISP-MIB", "lispEidRegistrationEtrTtl"), ("LISP-MIB", "lispEidRegistrationEtrProxyReply"), ("LISP-MIB", "lispEidRegistrationEtrWantsMapNotify"), ("LISP-MIB", "lispEidRegistrationLocatorIsLocal"), ("LISP-MIB", "lispEidRegistrationLocatorPriority"), ("LISP-MIB", "lispEidRegistrationLocatorWeight"), ("LISP-MIB", "lispEidRegistrationLocatorMPriority"), ("LISP-MIB", "lispEidRegistrationLocatorMWeight"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
lispMIBMapServerExtendedGroup = lispMIBMapServerExtendedGroup.setStatus('current')
if mibBuilder.loadTexts: lispMIBMapServerExtendedGroup.setDescription('A collection of objects to support the reporting of LISP features and properties on Map Servers related to EID registrations.')
lispMIBTuningParametersGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 220, 2, 2, 10)).setObjects(("LISP-MIB", "lispFeaturesMapCacheLimit"), ("LISP-MIB", "lispFeaturesEtrMapCacheTtl"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
lispMIBTuningParametersGroup = lispMIBTuningParametersGroup.setStatus('current')
if mibBuilder.loadTexts: lispMIBTuningParametersGroup.setDescription('A collection of objects used to support the reporting of parameters used to control LISP behavior and to tune performance.')
lispMIBEncapStatisticsGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 220, 2, 2, 11)).setObjects(("LISP-MIB", "lispMappingDatabaseTimeStamp"), ("LISP-MIB", "lispMappingDatabaseEncapOctets"), ("LISP-MIB", "lispMappingDatabaseEncapPackets"), ("LISP-MIB", "lispMappingDatabaseLocatorRlocTimeStamp"), ("LISP-MIB", "lispMappingDatabaseLocatorRlocEncapOctets"), ("LISP-MIB", "lispMappingDatabaseLocatorRlocEncapPackets"), ("LISP-MIB", "lispMapCacheEidTimeStamp"), ("LISP-MIB", "lispMapCacheEidEncapOctets"), ("LISP-MIB", "lispMapCacheEidEncapPackets"), ("LISP-MIB", "lispMapCacheLocatorRlocTimeStamp"), ("LISP-MIB", "lispMapCacheLocatorRlocEncapOctets"), ("LISP-MIB", "lispMapCacheLocatorRlocEncapPackets"), ("LISP-MIB", "lispConfiguredLocatorRlocTimeStamp"), ("LISP-MIB", "lispConfiguredLocatorRlocEncapOctets"), ("LISP-MIB", "lispConfiguredLocatorRlocEncapPackets"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
lispMIBEncapStatisticsGroup = lispMIBEncapStatisticsGroup.setStatus('current')
if mibBuilder.loadTexts: lispMIBEncapStatisticsGroup.setDescription('A collection of objects used to support the reporting of LISP encapsulation statistics for the device.')
lispMIBDecapStatisticsGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 220, 2, 2, 12)).setObjects(("LISP-MIB", "lispMappingDatabaseTimeStamp"), ("LISP-MIB", "lispMappingDatabaseDecapOctets"), ("LISP-MIB", "lispMappingDatabaseDecapPackets"), ("LISP-MIB", "lispMappingDatabaseLocatorRlocTimeStamp"), ("LISP-MIB", "lispMappingDatabaseLocatorRlocDecapOctets"), ("LISP-MIB", "lispMappingDatabaseLocatorRlocDecapPackets"), ("LISP-MIB", "lispMapCacheEidTimeStamp"), ("LISP-MIB", "lispMapCacheEidDecapOctets"), ("LISP-MIB", "lispMapCacheEidDecapPackets"), ("LISP-MIB", "lispMapCacheLocatorRlocTimeStamp"), ("LISP-MIB", "lispMapCacheLocatorRlocDecapOctets"), ("LISP-MIB", "lispMapCacheLocatorRlocDecapPackets"), ("LISP-MIB", "lispConfiguredLocatorRlocTimeStamp"), ("LISP-MIB", "lispConfiguredLocatorRlocDecapOctets"), ("LISP-MIB", "lispConfiguredLocatorRlocDecapPackets"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
lispMIBDecapStatisticsGroup = lispMIBDecapStatisticsGroup.setStatus('current')
if mibBuilder.loadTexts: lispMIBDecapStatisticsGroup.setDescription('A collection of objects used to support the reporting of LISP decapsulation statistics for the device.')
lispMIBDiagnosticsGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 220, 2, 2, 13)).setObjects(("LISP-MIB", "lispFeaturesRouterTimeStamp"), ("LISP-MIB", "lispGlobalStatsMapRequestsIn"), ("LISP-MIB", "lispGlobalStatsMapRequestsOut"), ("LISP-MIB", "lispGlobalStatsMapRepliesIn"), ("LISP-MIB", "lispGlobalStatsMapRepliesOut"), ("LISP-MIB", "lispGlobalStatsMapRegistersIn"), ("LISP-MIB", "lispGlobalStatsMapRegistersOut"), ("LISP-MIB", "lispEidRegistrationAuthenticationErrors"), ("LISP-MIB", "lispEidRegistrationRlocsMismatch"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
lispMIBDiagnosticsGroup = lispMIBDiagnosticsGroup.setStatus('current')
if mibBuilder.loadTexts: lispMIBDiagnosticsGroup.setDescription('A collection of objects used to support the reporting of additional diagnostics related to the LISP control-plane state of a LISP device.')
lispMIBVrfGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 220, 2, 2, 14)).setObjects(("LISP-MIB", "lispIidToVrfName"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
lispMIBVrfGroup = lispMIBVrfGroup.setStatus('current')
if mibBuilder.loadTexts: lispMIBVrfGroup.setDescription('A collection of objects used to support reporting of VRF-related information on a LISP device.')
mibBuilder.exportSymbols("LISP-MIB", lispFeaturesItrEnabled=lispFeaturesItrEnabled, lispConfiguredLocatorRlocLocal=lispConfiguredLocatorRlocLocal, lispConformance=lispConformance, lispUseProxyEtrWeight=lispUseProxyEtrWeight, lispConfiguredLocatorRloc=lispConfiguredLocatorRloc, lispMapCacheLocatorRlocMPriority=lispMapCacheLocatorRlocMPriority, lispFeaturesEntry=lispFeaturesEntry, lispUseProxyEtrEntry=lispUseProxyEtrEntry, lispEidRegistrationLastRegisterSender=lispEidRegistrationLastRegisterSender, lispUseMapResolverEntry=lispUseMapResolverEntry, lispGlobalStatsMapRegistersOut=lispGlobalStatsMapRegistersOut, lispEidRegistrationLocatorPriority=lispEidRegistrationLocatorPriority, lispEidRegistrationEtrEntry=lispEidRegistrationEtrEntry, lispEidRegistrationLocatorMWeight=lispEidRegistrationLocatorMWeight, lispGlobalStatsMapRegistersIn=lispGlobalStatsMapRegistersIn, lispMappingDatabaseLocatorRlocDecapOctets=lispMappingDatabaseLocatorRlocDecapOctets, lispMIBTuningParametersGroup=lispMIBTuningParametersGroup, lispMappingDatabaseLsb=lispMappingDatabaseLsb, lispMappingDatabaseLocatorRlocMPriority=lispMappingDatabaseLocatorRlocMPriority, lispEidRegistrationIsRegistered=lispEidRegistrationIsRegistered, lispMIBItrGroup=lispMIBItrGroup, lispMapCacheLocatorRlocWeight=lispMapCacheLocatorRlocWeight, lispMappingDatabaseLocatorRlocMWeight=lispMappingDatabaseLocatorRlocMWeight, lispMapCacheEidLength=lispMapCacheEidLength, lispCompliances=lispCompliances, lispMIBMapServerGroup=lispMIBMapServerGroup, lispMappingDatabaseEid=lispMappingDatabaseEid, lispUseProxyEtrTable=lispUseProxyEtrTable, lispEidRegistrationFirstTimeStamp=lispEidRegistrationFirstTimeStamp, lispUseMapServerAddressLength=lispUseMapServerAddressLength, lispMapCacheTable=lispMapCacheTable, lispMapCacheEidState=lispMapCacheEidState, lispUseProxyEtrMPriority=lispUseProxyEtrMPriority, lispEidRegistrationLocatorEntry=lispEidRegistrationLocatorEntry, lispEidRegistrationEtrTable=lispEidRegistrationEtrTable, lispEidRegistrationEntry=lispEidRegistrationEntry, lispMappingDatabaseLocatorRlocState=lispMappingDatabaseLocatorRlocState, lispFeaturesMapServerEnabled=lispFeaturesMapServerEnabled, lispMIBComplianceItr=lispMIBComplianceItr, lispMappingDatabaseLocatorEntry=lispMappingDatabaseLocatorEntry, lispIidToVrfEntry=lispIidToVrfEntry, lispUseMapServerEntry=lispUseMapServerEntry, lispEidRegistrationLocatorIsLocal=lispEidRegistrationLocatorIsLocal, lispEidRegistrationTable=lispEidRegistrationTable, lispMIBCompliancePitr=lispMIBCompliancePitr, lispUseMapServerState=lispUseMapServerState, lispMappingDatabaseLocatorRlocLength=lispMappingDatabaseLocatorRlocLength, lispConfiguredLocatorRlocTimeStamp=lispConfiguredLocatorRlocTimeStamp, lispConfiguredLocatorTable=lispConfiguredLocatorTable, lispConfiguredLocatorRlocDecapPackets=lispConfiguredLocatorRlocDecapPackets, lispMIBComplianceMapServer=lispMIBComplianceMapServer, lispMappingDatabaseEidPartitioned=lispMappingDatabaseEidPartitioned, lispFeaturesMapCacheLimit=lispFeaturesMapCacheLimit, lispMappingDatabaseLocatorRlocTimeStamp=lispMappingDatabaseLocatorRlocTimeStamp, lispMapCacheLocatorRlocDecapPackets=lispMapCacheLocatorRlocDecapPackets, lispFeaturesMapResolverEnabled=lispFeaturesMapResolverEnabled, lispMapCacheEidDecapOctets=lispMapCacheEidDecapOctets, lispEidRegistrationEidLength=lispEidRegistrationEidLength, lispMapCacheEntry=lispMapCacheEntry, PYSNMP_MODULE_ID=lispMIB, lispMapCacheEidTimeStamp=lispMapCacheEidTimeStamp, lispMapCacheLocatorRlocPriority=lispMapCacheLocatorRlocPriority, lispMIBItrExtendedGroup=lispMIBItrExtendedGroup, lispEidRegistrationLastTimeStamp=lispEidRegistrationLastTimeStamp, lispFeaturesEtrAcceptMapDataEnabled=lispFeaturesEtrAcceptMapDataEnabled, lispConfiguredLocatorEntry=lispConfiguredLocatorEntry, lispMIBPitrGroup=lispMIBPitrGroup, lispMappingDatabaseTimeStamp=lispMappingDatabaseTimeStamp, lispUseMapResolverState=lispUseMapResolverState, lispUseProxyEtrPriority=lispUseProxyEtrPriority, lispGlobalStatsTable=lispGlobalStatsTable, lispConfiguredLocatorRlocState=lispConfiguredLocatorRlocState, lispFeaturesTable=lispFeaturesTable, lispEidRegistrationEtrWantsMapNotify=lispEidRegistrationEtrWantsMapNotify, lispConfiguredLocatorRlocDecapOctets=lispConfiguredLocatorRlocDecapOctets, lispUseProxyEtrAddress=lispUseProxyEtrAddress, lispMapCacheLocatorEntry=lispMapCacheLocatorEntry, lispEidRegistrationRlocsMismatch=lispEidRegistrationRlocsMismatch, lispIidToVrfTable=lispIidToVrfTable, lispEidRegistrationLastRegisterSenderLength=lispEidRegistrationLastRegisterSenderLength, LispAddressType=LispAddressType, lispMapCacheLocatorRlocTimeStamp=lispMapCacheLocatorRlocTimeStamp, lispMappingDatabaseEncapOctets=lispMappingDatabaseEncapOctets, lispMappingDatabaseLocatorRlocWeight=lispMappingDatabaseLocatorRlocWeight, lispMappingDatabaseLocatorRlocEncapOctets=lispMappingDatabaseLocatorRlocEncapOctets, lispFeaturesEtrEnabled=lispFeaturesEtrEnabled, lispConfiguredLocatorRlocEncapOctets=lispConfiguredLocatorRlocEncapOctets, lispMapCacheLocatorRlocRtt=lispMapCacheLocatorRlocRtt, lispEidRegistrationEtrLastTimeStamp=lispEidRegistrationEtrLastTimeStamp, lispUseMapResolverTable=lispUseMapResolverTable, lispUseMapResolverAddressLength=lispUseMapResolverAddressLength, lispMIBCompliancePetr=lispMIBCompliancePetr, lispMapCacheLocatorRloc=lispMapCacheLocatorRloc, lispMappingDatabaseDecapPackets=lispMappingDatabaseDecapPackets, lispMapCacheEidExpiryTime=lispMapCacheEidExpiryTime, lispGroups=lispGroups, lispIidToVrfName=lispIidToVrfName, lispMapCacheEid=lispMapCacheEid, lispGlobalStatsEntry=lispGlobalStatsEntry, lispMapCacheLocatorRlocLastMPriorityChange=lispMapCacheLocatorRlocLastMPriorityChange, lispObjects=lispObjects, lispEidRegistrationSiteName=lispEidRegistrationSiteName, lispEidRegistrationAuthenticationErrors=lispEidRegistrationAuthenticationErrors, lispMapCacheEidDecapPackets=lispMapCacheEidDecapPackets, lispMIBMapServerExtendedGroup=lispMIBMapServerExtendedGroup, lispMIBMapResolverGroup=lispMIBMapResolverGroup, lispMapCacheEidAuthoritative=lispMapCacheEidAuthoritative, lispMappingDatabaseLocatorRlocPriority=lispMappingDatabaseLocatorRlocPriority, lispEidRegistrationEtrSenderLength=lispEidRegistrationEtrSenderLength, lispMIBComplianceEtr=lispMIBComplianceEtr, lispMIBPetrGroup=lispMIBPetrGroup, lispMapCacheLocatorRlocLastWeightChange=lispMapCacheLocatorRlocLastWeightChange, lispFeaturesEtrAcceptMapDataVerifyEnabled=lispFeaturesEtrAcceptMapDataVerifyEnabled, lispMappingDatabaseEncapPackets=lispMappingDatabaseEncapPackets, lispUseMapServerAddress=lispUseMapServerAddress, lispEidRegistrationEid=lispEidRegistrationEid, lispMapCacheEidEncapOctets=lispMapCacheEidEncapOctets, lispMapCacheLocatorRlocLength=lispMapCacheLocatorRlocLength, lispMIBEtrGroup=lispMIBEtrGroup, lispFeaturesProxyEtrEnabled=lispFeaturesProxyEtrEnabled, lispMapCacheLocatorRlocLastMWeightChange=lispMapCacheLocatorRlocLastMWeightChange, lispMIBDiagnosticsGroup=lispMIBDiagnosticsGroup, lispEidRegistrationLocatorRlocState=lispEidRegistrationLocatorRlocState, lispMappingDatabaseLocatorTable=lispMappingDatabaseLocatorTable, lispFeaturesProxyItrEnabled=lispFeaturesProxyItrEnabled, lispFeaturesAddressFamily=lispFeaturesAddressFamily, lispMapCacheLocatorRlocMWeight=lispMapCacheLocatorRlocMWeight, lispEidRegistrationLocatorWeight=lispEidRegistrationLocatorWeight, lispUseProxyEtrAddressLength=lispUseProxyEtrAddressLength, lispEidRegistrationSiteDescription=lispEidRegistrationSiteDescription, lispGlobalStatsMapRepliesOut=lispGlobalStatsMapRepliesOut, lispMapCacheLocatorRlocLastStateChange=lispMapCacheLocatorRlocLastStateChange, lispGlobalStatsMapRepliesIn=lispGlobalStatsMapRepliesIn, lispMappingDatabaseTable=lispMappingDatabaseTable, lispEidRegistrationEtrSender=lispEidRegistrationEtrSender, lispEidRegistrationLocatorRloc=lispEidRegistrationLocatorRloc, lispMapCacheLocatorRlocEncapOctets=lispMapCacheLocatorRlocEncapOctets, lispGlobalStatsMapRequestsOut=lispGlobalStatsMapRequestsOut, lispMIBComplianceMapResolver=lispMIBComplianceMapResolver, lispFeaturesEtrMapCacheTtl=lispFeaturesEtrMapCacheTtl, lispMapCacheEidEncapPackets=lispMapCacheEidEncapPackets, lispMappingDatabaseLocatorRlocLocal=lispMappingDatabaseLocatorRlocLocal, lispMappingDatabaseLocatorRlocEncapPackets=lispMappingDatabaseLocatorRlocEncapPackets, lispEidRegistrationLocatorMPriority=lispEidRegistrationLocatorMPriority, lispMIBEncapStatisticsGroup=lispMIBEncapStatisticsGroup, lispEidRegistrationEtrTtl=lispEidRegistrationEtrTtl, lispFeaturesRlocProbeEnabled=lispFeaturesRlocProbeEnabled, lispMapCacheLocatorRlocLastPriorityChange=lispMapCacheLocatorRlocLastPriorityChange, lispFeaturesMapCacheSize=lispFeaturesMapCacheSize, lispUseMapResolverAddress=lispUseMapResolverAddress, lispUseProxyEtrMWeight=lispUseProxyEtrMWeight, lispMapCacheLocatorRlocDecapOctets=lispMapCacheLocatorRlocDecapOctets, lispMapCacheLocatorRlocState=lispMapCacheLocatorRlocState, lispEidRegistrationLocatorTable=lispEidRegistrationLocatorTable, lispMapCacheLocatorTable=lispMapCacheLocatorTable, lispMappingDatabaseLocatorRloc=lispMappingDatabaseLocatorRloc, lispGlobalStatsMapRequestsIn=lispGlobalStatsMapRequestsIn, lispMIBEtrExtendedGroup=lispMIBEtrExtendedGroup, lispUseMapServerTable=lispUseMapServerTable, lispMIBDecapStatisticsGroup=lispMIBDecapStatisticsGroup, lispMappingDatabaseEidLength=lispMappingDatabaseEidLength, lispMappingDatabaseLocatorRlocDecapPackets=lispMappingDatabaseLocatorRlocDecapPackets, lispFeaturesRouterTimeStamp=lispFeaturesRouterTimeStamp, lispConfiguredLocatorRlocEncapPackets=lispConfiguredLocatorRlocEncapPackets, lispFeaturesInstanceID=lispFeaturesInstanceID, lispEidRegistrationEtrProxyReply=lispEidRegistrationEtrProxyReply, lispUseProxyEtrState=lispUseProxyEtrState, lispMapCacheLocatorRlocEncapPackets=lispMapCacheLocatorRlocEncapPackets, lispEidRegistrationLocatorRlocLength=lispEidRegistrationLocatorRlocLength, lispMIBVrfGroup=lispMIBVrfGroup, lispMappingDatabaseDecapOctets=lispMappingDatabaseDecapOctets, lispMappingDatabaseEntry=lispMappingDatabaseEntry, lispMIB=lispMIB, lispConfiguredLocatorRlocLength=lispConfiguredLocatorRlocLength)
| 177.719723
| 10,095
| 0.807831
|
153fa2b447fec3dd5522e58f2bbc905b2845a9c9
| 529
|
py
|
Python
|
problems/problem_4.py
|
minuq/project-euler
|
00307f01d06aa6dc61e701cf1bd90d2eafcf0478
|
[
"MIT"
] | null | null | null |
problems/problem_4.py
|
minuq/project-euler
|
00307f01d06aa6dc61e701cf1bd90d2eafcf0478
|
[
"MIT"
] | null | null | null |
problems/problem_4.py
|
minuq/project-euler
|
00307f01d06aa6dc61e701cf1bd90d2eafcf0478
|
[
"MIT"
] | null | null | null |
def palindrome(n):
x = 0
word = str(n)
while x < len(word)/2:
if word[x] != word[len(word)-1-x]:
return False
else:
x += 1
return True
def calc(n):
tmppalindrome = 0
x = n
while n/2 < x <= n:
y = n
while n/2 < y <= n:
tmp = x * y
if palindrome(tmp) and tmp > tmppalindrome:
tmppalindrome = tmp
y -= 1
x -= 1
return tmppalindrome
def main():
print("Problem 4:", calc(1000))
| 18.892857
| 55
| 0.444234
|
40bf24885aae7d1c1cc2310403406c6a80367501
| 3,670
|
py
|
Python
|
ucscentralsdk/mometa/adaptor/AdaptorFruCapProvider.py
|
ragupta-git/ucscentralsdk
|
2678008b5fb6b0fafafec388d0874147e95a1086
|
[
"Apache-2.0"
] | null | null | null |
ucscentralsdk/mometa/adaptor/AdaptorFruCapProvider.py
|
ragupta-git/ucscentralsdk
|
2678008b5fb6b0fafafec388d0874147e95a1086
|
[
"Apache-2.0"
] | null | null | null |
ucscentralsdk/mometa/adaptor/AdaptorFruCapProvider.py
|
ragupta-git/ucscentralsdk
|
2678008b5fb6b0fafafec388d0874147e95a1086
|
[
"Apache-2.0"
] | null | null | null |
"""This module contains the general information for AdaptorFruCapProvider ManagedObject."""
from ...ucscentralmo import ManagedObject
from ...ucscentralcoremeta import UcsCentralVersion, MoPropertyMeta, MoMeta
from ...ucscentralmeta import VersionMeta
class AdaptorFruCapProviderConsts():
DEPRECATED_FALSE = "false"
DEPRECATED_NO = "no"
DEPRECATED_TRUE = "true"
DEPRECATED_YES = "yes"
class AdaptorFruCapProvider(ManagedObject):
"""This is AdaptorFruCapProvider class."""
consts = AdaptorFruCapProviderConsts()
naming_props = set([u'vendor', u'model', u'revision'])
mo_meta = MoMeta("AdaptorFruCapProvider", "adaptorFruCapProvider", "manufacturer-[vendor]-model-[model]-revision-[revision]", VersionMeta.Version111a, "InputOutput", 0xff, [], ["admin"], [u'capabilityCatalogue'], [u'adaptorCapSpec', u'adaptorIScsiCap', u'adaptorRnicCapSpec', u'equipmentManufacturingDef', u'equipmentPicture'], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"deprecated": MoPropertyMeta("deprecated", "deprecated", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["false", "no", "true", "yes"], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []),
"gencount": MoPropertyMeta("gencount", "gencount", "uint", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"mgmt_plane_ver": MoPropertyMeta("mgmt_plane_ver", "mgmtPlaneVer", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"model": MoPropertyMeta("model", "model", "string", VersionMeta.Version111a, MoPropertyMeta.NAMING, 0x4, 1, 510, None, [], []),
"prom_card_type": MoPropertyMeta("prom_card_type", "promCardType", "ushort", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, [], []),
"revision": MoPropertyMeta("revision", "revision", "string", VersionMeta.Version111a, MoPropertyMeta.NAMING, 0x10, 1, 510, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, 0x20, 0, 256, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x40, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"vendor": MoPropertyMeta("vendor", "vendor", "string", VersionMeta.Version111a, MoPropertyMeta.NAMING, 0x80, 1, 510, None, [], []),
}
prop_map = {
"childAction": "child_action",
"deprecated": "deprecated",
"dn": "dn",
"gencount": "gencount",
"mgmtPlaneVer": "mgmt_plane_ver",
"model": "model",
"promCardType": "prom_card_type",
"revision": "revision",
"rn": "rn",
"status": "status",
"vendor": "vendor",
}
def __init__(self, parent_mo_or_dn, vendor, model, revision, **kwargs):
self._dirty_mask = 0
self.vendor = vendor
self.model = model
self.revision = revision
self.child_action = None
self.deprecated = None
self.gencount = None
self.mgmt_plane_ver = None
self.prom_card_type = None
self.status = None
ManagedObject.__init__(self, "AdaptorFruCapProvider", parent_mo_or_dn, **kwargs)
| 56.461538
| 340
| 0.666213
|
6e101e5d282f7b3f0c6cd7cdf5836c18da8f3347
| 1,991
|
py
|
Python
|
test/record/parser/test_response_whois_tcinet_ru_su_status_available.py
|
huyphan/pyyawhois
|
77fb2f73a9c67989f1d41d98f37037406a69d136
|
[
"MIT"
] | null | null | null |
test/record/parser/test_response_whois_tcinet_ru_su_status_available.py
|
huyphan/pyyawhois
|
77fb2f73a9c67989f1d41d98f37037406a69d136
|
[
"MIT"
] | null | null | null |
test/record/parser/test_response_whois_tcinet_ru_su_status_available.py
|
huyphan/pyyawhois
|
77fb2f73a9c67989f1d41d98f37037406a69d136
|
[
"MIT"
] | null | null | null |
# This file is autogenerated. Do not edit it manually.
# If you want change the content of this file, edit
#
# spec/fixtures/responses/whois.tcinet.ru/su/status_available
#
# and regenerate the tests with the following script
#
# $ scripts/generate_tests.py
#
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisTcinetRuSuStatusAvailable(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.tcinet.ru/su/status_available.txt"
host = "whois.tcinet.ru"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, [])
def test_available(self):
eq_(self.record.available, True)
def test_domain(self):
eq_(self.record.domain, None)
def test_nameservers(self):
eq_(self.record.nameservers.__class__.__name__, 'list')
eq_(self.record.nameservers, [])
def test_admin_contacts(self):
eq_(self.record.admin_contacts.__class__.__name__, 'list')
eq_(self.record.admin_contacts, [])
def test_registered(self):
eq_(self.record.registered, False)
def test_created_on(self):
eq_(self.record.created_on, None)
def test_registrar(self):
eq_(self.record.registrar, None)
def test_registrant_contacts(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.registrant_contacts)
def test_technical_contacts(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.technical_contacts)
def test_updated_on(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.updated_on)
def test_domain_id(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.domain_id)
def test_expires_on(self):
eq_(self.record.expires_on, None)
| 31.109375
| 96
| 0.709694
|
4a35f07ec9718cf1842d195e9728b929f19d5e32
| 923
|
py
|
Python
|
python/sagiri-bot/SAGIRIBOT/functions/get_history_today.py
|
GG-yuki/bugs
|
aabd576e9e57012a3390007af890b7c6ab6cdda8
|
[
"MIT"
] | null | null | null |
python/sagiri-bot/SAGIRIBOT/functions/get_history_today.py
|
GG-yuki/bugs
|
aabd576e9e57012a3390007af890b7c6ab6cdda8
|
[
"MIT"
] | null | null | null |
python/sagiri-bot/SAGIRIBOT/functions/get_history_today.py
|
GG-yuki/bugs
|
aabd576e9e57012a3390007af890b7c6ab6cdda8
|
[
"MIT"
] | null | null | null |
import aiohttp
from graia.application.message.chain import MessageChain
from graia.application.message.elements.internal import Plain
async def get_history_today() -> list:
"""
Get history today
Args:
None
Examples:
message = await get_history_today()
Return:
[
str: Auxiliary treatment to be done(Such as add statement),
MessageChain: Message to be send(MessageChain)
]
"""
api_url = "http://api.sagiri-web.com/historyToday/"
async with aiohttp.ClientSession() as session:
async with session.get(url=api_url) as resp:
text = await resp.text()
text = text.replace("\\n","\n")
text = text[1:-1]
while len(text) > 400:
text = "\n".join(text.split("\n")[int(len(text.split("\n"))/2):])
return [
"None",
MessageChain.create([
Plain(text=text)
])
]
| 24.289474
| 73
| 0.590466
|
7da8042e7a20cdb76d048b1739ade3d501f3f016
| 337
|
py
|
Python
|
warehouse/config/docs.py
|
ilhmndn/warehouse
|
19bce16f02e6a0cb50462d3a5485182d9a663373
|
[
"MIT"
] | null | null | null |
warehouse/config/docs.py
|
ilhmndn/warehouse
|
19bce16f02e6a0cb50462d3a5485182d9a663373
|
[
"MIT"
] | null | null | null |
warehouse/config/docs.py
|
ilhmndn/warehouse
|
19bce16f02e6a0cb50462d3a5485182d9a663373
|
[
"MIT"
] | null | null | null |
"""
Configuration for docs
"""
# source_link = "https://github.com/[org_name]/warehouse"
# docs_base_url = "https://[org_name].github.io/warehouse"
# headline = "App that does everything"
# sub_heading = "Yes, you got that right the first time, everything"
def get_context(context):
context.brand_html = "Warehouse Management System"
| 28.083333
| 68
| 0.735905
|
e3687c981d667f2d6f4b0ef8376520d900e6b57b
| 558
|
py
|
Python
|
src/analytics/migrations/0010_auto_20150203_1922.py
|
sachitanandpandey/videoup
|
e9231262de484893dac88da493bc1d3d5518a569
|
[
"MIT"
] | 79
|
2015-02-17T13:28:19.000Z
|
2022-03-26T04:36:24.000Z
|
src/analytics/migrations/0010_auto_20150203_1922.py
|
sachitanandpandey/videoup
|
e9231262de484893dac88da493bc1d3d5518a569
|
[
"MIT"
] | 5
|
2021-03-18T20:47:30.000Z
|
2022-03-11T23:26:28.000Z
|
src/analytics/migrations/0010_auto_20150203_1922.py
|
sachitanandpandey/videoup
|
e9231262de484893dac88da493bc1d3d5518a569
|
[
"MIT"
] | 67
|
2015-02-04T16:04:55.000Z
|
2022-01-17T16:22:41.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('analytics', '0009_auto_20150203_1909'),
]
operations = [
migrations.AlterField(
model_name='pageview',
name='timestamp',
field=models.DateTimeField(default=datetime.datetime(2015, 2, 3, 19, 22, 21, 464395, tzinfo=utc)),
preserve_default=True,
),
]
| 24.26087
| 110
| 0.641577
|
16ea81c7abd43059c437cd66f452afb6afa1496c
| 1,615
|
py
|
Python
|
Examples/mcp4725_simpletest.py
|
pmartel/CircuitPyDisk
|
4bcd71850dab9da84829126dfbd0e09948c3324f
|
[
"MIT"
] | null | null | null |
Examples/mcp4725_simpletest.py
|
pmartel/CircuitPyDisk
|
4bcd71850dab9da84829126dfbd0e09948c3324f
|
[
"MIT"
] | null | null | null |
Examples/mcp4725_simpletest.py
|
pmartel/CircuitPyDisk
|
4bcd71850dab9da84829126dfbd0e09948c3324f
|
[
"MIT"
] | null | null | null |
# Simple demo of setting the DAC value up and down through its entire range
# of values.
# Author: Tony DiCola
import board
import busio
import adafruit_mcp4725
# Initialize I2C bus.
i2c = busio.I2C(board.SCL, board.SDA)
# Initialize MCP4725.
dac = adafruit_mcp4725.MCP4725(i2c)
# Optionally you can specify a different addres if you override the A0 pin.
#amp = adafruit_max9744.MAX9744(i2c, address=0x63)
# There are a three ways to set the DAC output, you can use any of these:
dac.value = 65535 # Use the value property with a 16-bit number just like
# the AnalogOut class. Note the MCP4725 is only a 12-bit
# DAC so quantization errors will occur. The range of
# values is 0 (minimum/ground) to 65535 (maximum/Vout).
dac.raw_value = 4095 # Use the raw_value property to directly read and write
# the 12-bit DAC value. The range of values is
# 0 (minimum/ground) to 4095 (maximum/Vout).
dac.normalized_value = 1.0 # Use the normalized_value property to set the
# output with a floating point value in the range
# 0 to 1.0 where 0 is minimum/ground and 1.0 is
# maximum/Vout.
# Main loop will go up and down through the range of DAC values forever.
while True:
# Go up the 12-bit raw range.
print('Going up 0-3.3V...')
for i in range(4095):
dac.raw_value = i
# Go back down the 12-bit raw range.
print('Going down 3.3-0V...')
for i in range(4095, -1, -1):
dac.raw_value = i
| 37.55814
| 77
| 0.633437
|
f1007f08c4e56d108ded2452e526aee5e100218c
| 122
|
py
|
Python
|
examples/computer_vision/mmdetection_pytorch/configs/hrnet/htc_x101_64x4d_fpn_16x1_28e_coco.py
|
RAbraham/determined
|
1161b667ed6d0242f70f9f15d58600f910c8d7f9
|
[
"Apache-2.0"
] | 1
|
2021-03-29T13:39:45.000Z
|
2021-03-29T13:39:45.000Z
|
examples/computer_vision/mmdetection_pytorch/configs/hrnet/htc_x101_64x4d_fpn_16x1_28e_coco.py
|
RAbraham/determined
|
1161b667ed6d0242f70f9f15d58600f910c8d7f9
|
[
"Apache-2.0"
] | null | null | null |
examples/computer_vision/mmdetection_pytorch/configs/hrnet/htc_x101_64x4d_fpn_16x1_28e_coco.py
|
RAbraham/determined
|
1161b667ed6d0242f70f9f15d58600f910c8d7f9
|
[
"Apache-2.0"
] | null | null | null |
_base_ = "../htc/htc_x101_64x4d_fpn_16x1_20e_coco.py"
# learning policy
lr_config = dict(step=[24, 27])
total_epochs = 28
| 24.4
| 53
| 0.754098
|
4e53882cf017f921b1d013036a261a77d51debf8
| 27,424
|
py
|
Python
|
image_classification/T2T_ViT/main_multi_gpu.py
|
libertatis/PaddleViT
|
422b7fa4dda68c158fc9bd95e00aba01d4671916
|
[
"Apache-2.0"
] | 1
|
2021-12-28T07:08:58.000Z
|
2021-12-28T07:08:58.000Z
|
image_classification/T2T_ViT/main_multi_gpu.py
|
libertatis/PaddleViT
|
422b7fa4dda68c158fc9bd95e00aba01d4671916
|
[
"Apache-2.0"
] | null | null | null |
image_classification/T2T_ViT/main_multi_gpu.py
|
libertatis/PaddleViT
|
422b7fa4dda68c158fc9bd95e00aba01d4671916
|
[
"Apache-2.0"
] | 1
|
2022-01-10T04:52:21.000Z
|
2022-01-10T04:52:21.000Z
|
# Copyright (c) 2021 PPViT Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""T2T-ViT training/validation using multiple GPU """
import sys
import os
import time
import logging
import argparse
import random
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import paddle.distributed as dist
from datasets import get_dataloader
from datasets import get_dataset
from utils import AverageMeter
from utils import WarmupCosineScheduler
from utils import get_exclude_from_weight_decay_fn
from config import get_config
from config import update_config
from mixup import Mixup
from losses import LabelSmoothingCrossEntropyLoss
from losses import SoftTargetCrossEntropyLoss
from losses import DistillationLoss
from model_ema import ModelEma
from t2t_vit import build_t2t_vit as build_model
def get_arguments():
"""return argumeents, this will overwrite the config after loading yaml file"""
parser = argparse.ArgumentParser('T2T-ViT')
parser.add_argument('-cfg', type=str, default=None)
parser.add_argument('-dataset', type=str, default=None)
parser.add_argument('-batch_size', type=int, default=None)
parser.add_argument('-image_size', type=int, default=None)
parser.add_argument('-data_path', type=str, default=None)
parser.add_argument('-output', type=str, default=None)
parser.add_argument('-ngpus', type=int, default=None)
parser.add_argument('-pretrained', type=str, default=None)
parser.add_argument('-resume', type=str, default=None)
parser.add_argument('-last_epoch', type=int, default=None)
parser.add_argument('-eval', action='store_true')
parser.add_argument('-amp', action='store_true')
arguments = parser.parse_args()
return arguments
def get_logger(filename, logger_name=None):
"""set logging file and format
Args:
filename: str, full path of the logger file to write
logger_name: str, the logger name, e.g., 'master_logger', 'local_logger'
Return:
logger: python logger
"""
log_format = "%(asctime)s %(message)s"
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt="%m%d %I:%M:%S %p")
# different name is needed when creating multiple logger in one process
logger = logging.getLogger(logger_name)
fh = logging.FileHandler(os.path.join(filename))
fh.setFormatter(logging.Formatter(log_format))
logger.addHandler(fh)
return logger
def train(dataloader,
model,
criterion,
optimizer,
epoch,
total_epochs,
total_batch,
debug_steps=100,
accum_iter=1,
model_ema=None,
mixup_fn=None,
amp=False,
local_logger=None,
master_logger=None):
"""Training for one epoch
Args:
dataloader: paddle.io.DataLoader, dataloader instance
model: nn.Layer, a ViT model
criterion: nn.criterion
epoch: int, current epoch
total_epochs: int, total num of epochs
total_batch: int, total num of batches for one epoch
debug_steps: int, num of iters to log info, default: 100
accum_iter: int, num of iters for accumulating gradients, default: 1
model_ema: ModelEma, model moving average instance
mixup_fn: Mixup, mixup instance, default: None
amp: bool, if True, use mix precision training, default: False
local_logger: logger for local process/gpu, default: None
master_logger: logger for main process, default: None
Returns:
train_loss_meter.avg: float, average loss on current process/gpu
train_acc_meter.avg: float, average top1 accuracy on current process/gpu
master_train_loss_meter.avg: float, average loss on all processes/gpus
master_train_acc_meter.avg: float, average top1 accuracy on all processes/gpus
train_time: float, training time
"""
model.train()
train_loss_meter = AverageMeter()
train_acc_meter = AverageMeter()
master_train_loss_meter = AverageMeter()
master_train_acc_meter = AverageMeter()
if amp is True:
scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
time_st = time.time()
for batch_id, data in enumerate(dataloader):
image = data[0]
label = data[1]
label_orig = label.clone()
if mixup_fn is not None:
image, label = mixup_fn(image, label_orig)
if amp is True: # mixed precision training
with paddle.amp.auto_cast():
output = model(image)
loss = criterion(output, label)
scaled = scaler.scale(loss)
scaled.backward()
if ((batch_id +1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)):
scaler.minimize(optimizer, scaled)
optimizer.clear_grad()
else: # full precision training
output = model(image)
loss = criterion(output, label)
#NOTE: division may be needed depending on the loss function
# Here no division is needed:
# default 'reduction' param in nn.CrossEntropyLoss is set to 'mean'
#loss = loss / accum_iter
loss.backward()
if ((batch_id +1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)):
optimizer.step()
optimizer.clear_grad()
if model_ema is not None and dist.get_rank() == 0:
model_ema.update(model)
pred = F.softmax(output)
if mixup_fn:
acc = paddle.metric.accuracy(pred, label_orig)
else:
acc = paddle.metric.accuracy(pred, label_orig.unsqueeze(1))
batch_size = paddle.to_tensor(image.shape[0])
# sync from other gpus for overall loss and acc
master_loss = loss.clone()
master_acc = acc.clone()
master_batch_size = batch_size.clone()
dist.all_reduce(master_loss)
dist.all_reduce(master_acc)
dist.all_reduce(master_batch_size)
master_loss = master_loss / dist.get_world_size()
master_acc = master_acc / dist.get_world_size()
master_train_loss_meter.update(master_loss.numpy()[0], master_batch_size.numpy()[0])
master_train_acc_meter.update(master_acc.numpy()[0], master_batch_size.numpy()[0])
train_loss_meter.update(loss.numpy()[0], batch_size.numpy()[0])
train_acc_meter.update(acc.numpy()[0], batch_size.numpy()[0])
if batch_id % debug_steps == 0:
if local_logger:
local_logger.info(
f"Epoch[{epoch:03d}/{total_epochs:03d}], " +
f"Step[{batch_id:04d}/{total_batch:04d}], " +
f"Avg Loss: {train_loss_meter.avg:.4f}, " +
f"Avg Acc: {train_acc_meter.avg:.4f}")
if master_logger and dist.get_rank() == 0:
master_logger.info(
f"Epoch[{epoch:03d}/{total_epochs:03d}], " +
f"Step[{batch_id:04d}/{total_batch:04d}], " +
f"Avg Loss: {master_train_loss_meter.avg:.4f}, " +
f"Avg Acc: {master_train_acc_meter.avg:.4f}")
train_time = time.time() - time_st
return (train_loss_meter.avg,
train_acc_meter.avg,
master_train_loss_meter.avg,
master_train_acc_meter.avg,
train_time)
def validate(dataloader,
model,
criterion,
total_batch,
debug_steps=100,
local_logger=None,
master_logger=None):
"""Validation for whole dataset
Args:
dataloader: paddle.io.DataLoader, dataloader instance
model: nn.Layer, a ViT model
criterion: nn.criterion
total_epoch: int, total num of epoch, for logging
debug_steps: int, num of iters to log info, default: 100
local_logger: logger for local process/gpu, default: None
master_logger: logger for main process, default: None
Returns:
val_loss_meter.avg: float, average loss on current process/gpu
val_acc1_meter.avg: float, average top1 accuracy on current process/gpu
val_acc5_meter.avg: float, average top5 accuracy on current process/gpu
master_val_loss_meter.avg: float, average loss on all processes/gpus
master_val_acc1_meter.avg: float, average top1 accuracy on all processes/gpus
master_val_acc5_meter.avg: float, average top5 accuracy on all processes/gpus
val_time: float, validation time
"""
model.eval()
val_loss_meter = AverageMeter()
val_acc1_meter = AverageMeter()
val_acc5_meter = AverageMeter()
master_val_loss_meter = AverageMeter()
master_val_acc1_meter = AverageMeter()
master_val_acc5_meter = AverageMeter()
time_st = time.time()
with paddle.no_grad():
for batch_id, data in enumerate(dataloader):
image = data[0]
label = data[1]
output = model(image)
loss = criterion(output, label)
pred = F.softmax(output)
acc1 = paddle.metric.accuracy(pred, label.unsqueeze(1))
acc5 = paddle.metric.accuracy(pred, label.unsqueeze(1), k=5)
batch_size = paddle.to_tensor(image.shape[0])
master_loss = loss.clone()
master_acc1 = acc1.clone()
master_acc5 = acc5.clone()
master_batch_size = batch_size.clone()
dist.all_reduce(master_loss)
dist.all_reduce(master_acc1)
dist.all_reduce(master_acc5)
dist.all_reduce(master_batch_size)
master_loss = master_loss / dist.get_world_size()
master_acc1 = master_acc1 / dist.get_world_size()
master_acc5 = master_acc5 / dist.get_world_size()
master_val_loss_meter.update(master_loss.numpy()[0], master_batch_size.numpy()[0])
master_val_acc1_meter.update(master_acc1.numpy()[0], master_batch_size.numpy()[0])
master_val_acc5_meter.update(master_acc5.numpy()[0], master_batch_size.numpy()[0])
val_loss_meter.update(loss.numpy()[0], batch_size.numpy()[0])
val_acc1_meter.update(acc1.numpy()[0], batch_size.numpy()[0])
val_acc5_meter.update(acc5.numpy()[0], batch_size.numpy()[0])
if batch_id % debug_steps == 0:
if local_logger:
local_logger.info(
f"Val Step[{batch_id:04d}/{total_batch:04d}], " +
f"Avg Loss: {val_loss_meter.avg:.4f}, " +
f"Avg Acc@1: {val_acc1_meter.avg:.4f}, " +
f"Avg Acc@5: {val_acc5_meter.avg:.4f}")
if master_logger and dist.get_rank() == 0:
master_logger.info(
f"Val Step[{batch_id:04d}/{total_batch:04d}], " +
f"Avg Loss: {master_val_loss_meter.avg:.4f}, " +
f"Avg Acc@1: {master_val_acc1_meter.avg:.4f}, " +
f"Avg Acc@5: {master_val_acc5_meter.avg:.4f}")
val_time = time.time() - time_st
return (val_loss_meter.avg,
val_acc1_meter.avg,
val_acc5_meter.avg,
master_val_loss_meter.avg,
master_val_acc1_meter.avg,
master_val_acc5_meter.avg,
val_time)
def main_worker(*args):
# STEP 0: Preparation
config = args[0]
dist.init_parallel_env()
last_epoch = config.TRAIN.LAST_EPOCH
world_size = dist.get_world_size()
local_rank = dist.get_rank()
seed = config.SEED + local_rank
paddle.seed(seed)
np.random.seed(seed)
random.seed(seed)
# logger for each process/gpu
local_logger = get_logger(
filename=os.path.join(config.SAVE, 'log_{}.txt'.format(local_rank)),
logger_name='local_logger')
# overall logger
if local_rank == 0:
master_logger = get_logger(
filename=os.path.join(config.SAVE, 'log.txt'),
logger_name='master_logger')
master_logger.info(f'\n{config}')
else:
master_logger = None
local_logger.info(f'----- world_size = {world_size}, local_rank = {local_rank}')
if local_rank == 0:
master_logger.info(f'----- world_size = {world_size}, local_rank = {local_rank}')
# STEP 1: Create model
model = build_model(config)
# define model ema
model_ema = None
if not config.EVAL and config.TRAIN.MODEL_EMA and local_rank == 0:
model_ema = ModelEma(model, decay=config.TRAIN.MODEL_EMA_DECAY)
model = paddle.DataParallel(model)
# STEP 2: Create train and val dataloader
dataset_train, dataset_val = args[1], args[2]
# Create training dataloader
if not config.EVAL:
dataloader_train = get_dataloader(config, dataset_train, 'train', True)
total_batch_train = len(dataloader_train)
local_logger.info(f'----- Total # of train batch (single gpu): {total_batch_train}')
if local_rank == 0:
master_logger.info(f'----- Total # of train batch (single gpu): {total_batch_train}')
# Create validation dataloader
dataloader_val = get_dataloader(config, dataset_val, 'test', True)
total_batch_val = len(dataloader_val)
local_logger.info(f'----- Total # of val batch (single gpu): {total_batch_val}')
if local_rank == 0:
master_logger.info(f'----- Total # of val batch (single gpu): {total_batch_val}')
# STEP 3: Define Mixup function
mixup_fn = None
if config.TRAIN.MIXUP_PROB > 0 or config.TRAIN.CUTMIX_ALPHA > 0 or config.TRAIN.CUTMIX_MINMAX is not None:
mixup_fn = Mixup(mixup_alpha=config.TRAIN.MIXUP_ALPHA,
cutmix_alpha=config.TRAIN.CUTMIX_ALPHA,
cutmix_minmax=config.TRAIN.CUTMIX_MINMAX,
prob=config.TRAIN.MIXUP_PROB,
switch_prob=config.TRAIN.MIXUP_SWITCH_PROB,
mode=config.TRAIN.MIXUP_MODE,
label_smoothing=config.TRAIN.SMOOTHING,
num_classes=config.MODEL.NUM_CLASSES)
# STEP 4: Define criterion
if config.TRAIN.MIXUP_PROB > 0.:
criterion = SoftTargetCrossEntropyLoss()
elif config.TRAIN.SMOOTHING:
criterion = LabelSmoothingCrossEntropyLoss()
else:
criterion = nn.CrossEntropyLoss()
# only use cross entropy for val
criterion_val = nn.CrossEntropyLoss()
# STEP 5: Define optimizer and lr_scheduler
# set lr according to batch size and world size (hacked from Swin official code and modified for CSwin)
if config.TRAIN.LINEAR_SCALED_LR is not None:
linear_scaled_lr = (
config.TRAIN.BASE_LR * config.DATA.BATCH_SIZE * world_size) / config.TRAIN.LINEAR_SCALED_LR
linear_scaled_warmup_start_lr = (
config.TRAIN.WARMUP_START_LR * config.DATA.BATCH_SIZE * world_size) / config.TRAIN.LINEAR_SCALED_LR
linear_scaled_end_lr = (
config.TRAIN.END_LR * config.DATA.BATCH_SIZE * world_size) / config.TRAIN.LINEAR_SCALED_LR
if config.TRAIN.ACCUM_ITER > 1:
linear_scaled_lr = linear_scaled_lr * config.TRAIN.ACCUM_ITER
linear_scaled_warmup_start_lr = linear_scaled_warmup_start_lr * config.TRAIN.ACCUM_ITER
linear_scaled_end_lr = linear_scaled_end_lr * config.TRAIN.ACCUM_ITER
config.TRAIN.BASE_LR = linear_scaled_lr
config.TRAIN.WARMUP_START_LR = linear_scaled_warmup_start_lr
config.TRAIN.END_LR = linear_scaled_end_lr
scheduler = None
if config.TRAIN.LR_SCHEDULER.NAME == "warmupcosine":
scheduler = WarmupCosineScheduler(learning_rate=config.TRAIN.BASE_LR,
warmup_start_lr=config.TRAIN.WARMUP_START_LR,
start_lr=config.TRAIN.BASE_LR,
end_lr=config.TRAIN.END_LR,
warmup_epochs=config.TRAIN.WARMUP_EPOCHS,
total_epochs=config.TRAIN.NUM_EPOCHS,
last_epoch=config.TRAIN.LAST_EPOCH,
)
elif config.TRAIN.LR_SCHEDULER.NAME == "cosine":
scheduler = paddle.optimizer.lr.CosineAnnealingDecay(learning_rate=config.TRAIN.BASE_LR,
T_max=config.TRAIN.NUM_EPOCHS,
last_epoch=last_epoch)
elif config.scheduler == "multi-step":
milestones = [int(v.strip()) for v in config.TRAIN.LR_SCHEDULER.MILESTONES.split(",")]
scheduler = paddle.optimizer.lr.MultiStepDecay(learning_rate=config.TRAIN.BASE_LR,
milestones=milestones,
gamma=config.TRAIN.LR_SCHEDULER.DECAY_RATE,
last_epoch=last_epoch)
else:
local_logger.fatal(f"Unsupported Scheduler: {config.TRAIN.LR_SCHEDULER}.")
if local_rank == 0:
master_logger.fatal(f"Unsupported Scheduler: {config.TRAIN.LR_SCHEDULER}.")
raise NotImplementedError(f"Unsupported Scheduler: {config.TRAIN.LR_SCHEDULER}.")
if config.TRAIN.OPTIMIZER.NAME == "SGD":
if config.TRAIN.GRAD_CLIP:
clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP)
else:
clip = None
optimizer = paddle.optimizer.Momentum(
parameters=model.parameters(),
learning_rate=scheduler if scheduler is not None else config.TRAIN.BASE_LR,
weight_decay=config.TRAIN.WEIGHT_DECAY,
momentum=config.TRAIN.OPTIMIZER.MOMENTUM,
grad_clip=clip)
elif config.TRAIN.OPTIMIZER.NAME == "AdamW":
if config.TRAIN.GRAD_CLIP:
clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP)
else:
clip = None
optimizer = paddle.optimizer.AdamW(
parameters=model.parameters(),
learning_rate=scheduler if scheduler is not None else config.TRAIN.BASE_LR,
beta1=config.TRAIN.OPTIMIZER.BETAS[0],
beta2=config.TRAIN.OPTIMIZER.BETAS[1],
weight_decay=config.TRAIN.WEIGHT_DECAY,
epsilon=config.TRAIN.OPTIMIZER.EPS,
grad_clip=clip,
apply_decay_param_fun=get_exclude_from_weight_decay_fn(['cls_token']),
)
else:
local_logger.fatal(f"Unsupported Optimizer: {config.TRAIN.OPTIMIZER.NAME}.")
if local_rank == 0:
master_logger.fatal(f"Unsupported Optimizer: {config.TRAIN.OPTIMIZER.NAME}.")
raise NotImplementedError(f"Unsupported Optimizer: {config.TRAIN.OPTIMIZER.NAME}.")
# STEP 6: Load pretrained model / load resumt model and optimizer states
if config.MODEL.PRETRAINED:
if (config.MODEL.PRETRAINED).endswith('.pdparams'):
raise ValueError(f'{config.MODEL.PRETRAINED} should not contain .pdparams')
assert os.path.isfile(config.MODEL.PRETRAINED + '.pdparams') is True
model_state = paddle.load(config.MODEL.PRETRAINED+'.pdparams')
model.set_dict(model_state)
local_logger.info(f"----- Pretrained: Load model state from {config.MODEL.PRETRAINED}")
if local_rank == 0:
master_logger.info(
f"----- Pretrained: Load model state from {config.MODEL.PRETRAINED}")
if config.MODEL.RESUME:
assert os.path.isfile(config.MODEL.RESUME + '.pdparams') is True
assert os.path.isfile(config.MODEL.RESUME + '.pdopt') is True
model_state = paddle.load(config.MODEL.RESUME + '.pdparams')
model.set_dict(model_state)
opt_state = paddle.load(config.MODEL.RESUME+'.pdopt')
optimizer.set_state_dict(opt_state)
local_logger.info(
f"----- Resume Training: Load model and optmizer from {config.MODEL.RESUME}")
if local_rank == 0:
master_logger.info(
f"----- Resume Training: Load model and optmizer from {config.MODEL.RESUME}")
# load ema model
if model_ema is not None and os.path.isfile(config.MODEL.RESUME + '-EMA.pdparams'):
model_ema_state = paddle.load(config.MODEL.RESUME + '-EMA.pdparams')
model_ema.module.set_state_dict(model_ema_state)
local_logger.info(f'----- Load model ema from {config.MODEL.RESUME}-EMA.pdparams')
if local_rank == 0:
master_logger.info(f'----- Load model ema from {config.MODEL.RESUME}-EMA.pdparams')
# STEP 7: Validation (eval mode)
if config.EVAL:
local_logger.info('----- Start Validating')
if local_rank == 0:
master_logger.info('----- Start Validating')
val_loss, val_acc1, val_acc5, avg_loss, avg_acc1, avg_acc5, val_time = validate(
dataloader=dataloader_val,
model=model,
criterion=criterion_val,
total_batch=total_batch_val,
debug_steps=config.REPORT_FREQ,
local_logger=local_logger,
master_logger=master_logger)
local_logger.info(f"Validation Loss: {val_loss:.4f}, " +
f"Validation Acc@1: {val_acc1:.4f}, " +
f"Validation Acc@5: {val_acc5:.4f}, " +
f"time: {val_time:.2f}")
if local_rank == 0:
master_logger.info(f"Validation Loss: {avg_loss:.4f}, " +
f"Validation Acc@1: {avg_acc1:.4f}, " +
f"Validation Acc@5: {avg_acc5:.4f}, " +
f"time: {val_time:.2f}")
return
# STEP 8: Start training and validation (train mode)
local_logger.info(f"Start training from epoch {last_epoch+1}.")
if local_rank == 0:
master_logger.info(f"Start training from epoch {last_epoch+1}.")
for epoch in range(last_epoch+1, config.TRAIN.NUM_EPOCHS+1):
# train
local_logger.info(f"Now training epoch {epoch}. LR={optimizer.get_lr():.6f}")
if local_rank == 0:
master_logger.info(f"Now training epoch {epoch}. LR={optimizer.get_lr():.6f}")
train_loss, train_acc, avg_loss, avg_acc, train_time = train(
dataloader=dataloader_train,
model=model,
criterion=criterion,
optimizer=optimizer,
epoch=epoch,
total_epochs=config.TRAIN.NUM_EPOCHS,
total_batch=total_batch_train,
debug_steps=config.REPORT_FREQ,
accum_iter=config.TRAIN.ACCUM_ITER,
model_ema=model_ema,
mixup_fn=mixup_fn,
amp=config.AMP,
local_logger=local_logger,
master_logger=master_logger)
scheduler.step()
local_logger.info(f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " +
f"Train Loss: {train_loss:.4f}, " +
f"Train Acc: {train_acc:.4f}, " +
f"time: {train_time:.2f}")
if local_rank == 0:
master_logger.info(f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " +
f"Train Loss: {avg_loss:.4f}, " +
f"Train Acc: {avg_acc:.4f}, " +
f"time: {train_time:.2f}")
# validation
if epoch % config.VALIDATE_FREQ == 0 or epoch == config.TRAIN.NUM_EPOCHS:
local_logger.info(f'----- Validation after Epoch: {epoch}')
if local_rank == 0:
master_logger.info(f'----- Validation after Epoch: {epoch}')
val_loss, val_acc1, val_acc5, avg_loss, avg_acc1, avg_acc5, val_time = validate(
dataloader=dataloader_val,
model=model,
criterion=criterion_val,
total_batch=total_batch_val,
debug_steps=config.REPORT_FREQ,
local_logger=local_logger,
master_logger=master_logger)
local_logger.info(f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " +
f"Validation Loss: {val_loss:.4f}, " +
f"Validation Acc@1: {val_acc1:.4f}, " +
f"Validation Acc@5: {val_acc5:.4f}, " +
f"time: {val_time:.2f}")
if local_rank == 0:
master_logger.info(f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " +
f"Validation Loss: {avg_loss:.4f}, " +
f"Validation Acc@1: {avg_acc1:.4f}, " +
f"Validation Acc@5: {avg_acc5:.4f}, " +
f"time: {val_time:.2f}")
# model save
if local_rank == 0:
if epoch % config.SAVE_FREQ == 0 or epoch == config.TRAIN.NUM_EPOCHS:
model_path = os.path.join(
config.SAVE, f"{config.MODEL.TYPE}-Epoch-{epoch}-Loss-{train_loss}")
paddle.save(model.state_dict(), model_path + '.pdparams')
paddle.save(optimizer.state_dict(), model_path + '.pdopt')
master_logger.info(f"----- Save model: {model_path}.pdparams")
master_logger.info(f"----- Save optim: {model_path}.pdopt")
if model_ema is not None:
model_ema_path = os.path.join(
config.SAVE, f"{config.MODEL.TYPE}-Epoch-{epoch}-Loss-{train_loss}-EMA")
paddle.save(model_ema.state_dict(), model_ema_path + '.pdparams')
master_logger.info(f"----- Save ema model: {model_ema_path}.pdparams")
def main():
# config is updated by: (1) config.py, (2) yaml file, (3) arguments
arguments = get_arguments()
config = get_config()
config = update_config(config, arguments)
# set output folder
if not config.EVAL:
config.SAVE = '{}/train-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M-%S'))
else:
config.SAVE = '{}/eval-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M-%S'))
if not os.path.exists(config.SAVE):
os.makedirs(config.SAVE, exist_ok=True)
# get dataset and start DDP
if not config.EVAL:
dataset_train = get_dataset(config, mode='train')
else:
dataset_train = None
dataset_val = get_dataset(config, mode='val')
config.NGPUS = len(paddle.static.cuda_places()) if config.NGPUS == -1 else config.NGPUS
dist.spawn(main_worker, args=(config, dataset_train, dataset_val, ), nprocs=config.NGPUS)
if __name__ == "__main__":
main()
| 45.179572
| 111
| 0.612894
|
fe1c27d2ba98b3cc48d31f87d8b29100034d3cc3
| 580
|
py
|
Python
|
polls/admin.py
|
philmui/polling
|
f436f6219b108dcb0410cf67e8e4028568a3dd51
|
[
"MIT"
] | null | null | null |
polls/admin.py
|
philmui/polling
|
f436f6219b108dcb0410cf67e8e4028568a3dd51
|
[
"MIT"
] | null | null | null |
polls/admin.py
|
philmui/polling
|
f436f6219b108dcb0410cf67e8e4028568a3dd51
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Choice, Question
admin.AdminSite.site_header = "Polling Administration"
#class ChoiceInline(admin.StackedInline):
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
class QuestionAdmin(admin.ModelAdmin):
list_display = ('text', 'pub_date', 'was_published_recently')
list_filter = ['pub_date']
search_fields = ['text']
fieldsets = [
(None, {'fields': ['text']}),
('Date information', {'fields': ['pub_date']})
]
inlines = [ChoiceInline]
admin.site.register(Question, QuestionAdmin)
| 25.217391
| 62
| 0.715517
|
4d226527944482bc09f4dc18094032322e1b7430
| 15,213
|
py
|
Python
|
tests/emails_test.py
|
mailosaurapp/mailosaur-python
|
ee7ca112b0351dec6320108e718f643c9d874042
|
[
"MIT"
] | null | null | null |
tests/emails_test.py
|
mailosaurapp/mailosaur-python
|
ee7ca112b0351dec6320108e718f643c9d874042
|
[
"MIT"
] | 2
|
2016-05-16T21:11:21.000Z
|
2016-05-17T18:47:04.000Z
|
tests/emails_test.py
|
mailosaurapp/mailosaur-python
|
ee7ca112b0351dec6320108e718f643c9d874042
|
[
"MIT"
] | null | null | null |
import pytest
import os
import base64
from datetime import datetime, timedelta
from unittest import TestCase
from .mailer import Mailer
from mailosaur import MailosaurClient
from mailosaur.models import Attachment, SearchCriteria, MessageCreateOptions, MessageForwardOptions, MessageReplyOptions, MailosaurException
class EmailsTest(TestCase):
@classmethod
def setUpClass(cls):
api_key = os.getenv('MAILOSAUR_API_KEY')
base_url = os.getenv('MAILOSAUR_BASE_URL')
cls.server = os.getenv('MAILOSAUR_SERVER')
cls.verified_domain = os.getenv('MAILOSAUR_VERIFIED_DOMAIN')
if (api_key or cls.server) is None:
raise Exception(
"Missing necessary environment variables - refer to README.md")
cls.client = MailosaurClient(api_key, base_url)
cls.client.messages.delete_all(cls.server)
Mailer.send_emails(cls.client, cls.server, 5)
cls.emails = cls.client.messages.list(cls.server).items
def test_list(self):
self.assertEqual(5, len(self.emails))
for email in self.emails:
self.validate_email_summary(email)
def test_list_received_after(self):
past_date = datetime.today() - timedelta(minutes=10)
past_emails = self.client.messages.list(
self.server, received_after=past_date).items
self.assertTrue(len(past_emails) > 0)
future_emails = self.client.messages.list(
self.server, received_after=datetime.today()).items
self.assertEqual(0, len(future_emails))
def test_get(self):
host = os.getenv('MAILOSAUR_SMTP_HOST', 'mailosaur.net')
test_email_address = "wait_for_test@%s.%s" % (self.server, host)
Mailer.send_email(self.client, self.server, test_email_address)
criteria = SearchCriteria()
criteria.sent_to = test_email_address
email = self.client.messages.get(self.server, criteria)
self.validate_email(email)
def test_get(self):
email_to_retrieve = self.emails[0]
email = self.client.messages.get_by_id(email_to_retrieve.id)
self.validate_email(email)
self.validate_headers(email)
def test_get_not_found(self):
with self.assertRaises(MailosaurException):
self.client.messages.get_by_id(
"efe907e9-74ed-4113-a3e0-a3d41d914765")
def test_search_timeout_errors_suppressed(self):
criteria = SearchCriteria()
criteria.sent_from = "neverfound@example.com"
results = self.client.messages.search(
self.server, criteria, timeout=1, error_on_timeout=False).items
self.assertEqual(0, len(results))
def test_search_no_criteria_error(self):
with self.assertRaises(MailosaurException):
self.client.messages.search(self.server, SearchCriteria())
def test_search_by_sent_from(self):
target_email = self.emails[1]
criteria = SearchCriteria()
criteria.sent_from = target_email.sender[0].email
results = self.client.messages.search(self.server, criteria).items
self.assertEqual(1, len(results))
self.assertEqual(
target_email.sender[0].email, results[0].sender[0].email)
self.assertEqual(target_email.subject, results[0].subject)
def test_search_by_sent_to(self):
target_email = self.emails[1]
criteria = SearchCriteria()
criteria.sent_to = target_email.to[0].email
results = self.client.messages.search(self.server, criteria).items
self.assertEqual(1, len(results))
self.assertEqual(target_email.to[0].email, results[0].to[0].email)
self.assertEqual(target_email.subject, results[0].subject)
def test_search_by_body(self):
target_email = self.emails[1]
unique_string = target_email.subject[0:10]
criteria = SearchCriteria()
criteria.body = "%s html" % (unique_string)
results = self.client.messages.search(self.server, criteria).items
self.assertEqual(1, len(results))
self.assertEqual(target_email.to[0].email, results[0].to[0].email)
self.assertEqual(target_email.subject, results[0].subject)
def test_search_by_subject(self):
target_email = self.emails[1]
unique_string = target_email.subject[0:10]
criteria = SearchCriteria()
criteria.subject = unique_string
results = self.client.messages.search(self.server, criteria).items
self.assertEqual(1, len(results))
self.assertEqual(target_email.to[0].email, results[0].to[0].email)
self.assertEqual(target_email.subject, results[0].subject)
def test_search_with_match_all(self):
target_email = self.emails[1]
unique_string = target_email.subject[0:10]
criteria = SearchCriteria()
criteria.subject = unique_string
criteria.body = "this is a link"
criteria.match = "ALL"
results = self.client.messages.search(self.server, criteria).items
self.assertEqual(1, len(results))
def test_search_with_match_any(self):
target_email = self.emails[1]
unique_string = target_email.subject[0:10]
criteria = SearchCriteria()
criteria.subject = unique_string
criteria.body = "this is a link"
criteria.match = "ANY"
results = self.client.messages.search(self.server, criteria).items
self.assertEqual(4, len(results))
def test_search_with_special_characters(self):
criteria = SearchCriteria()
criteria.subject = "Search with ellipsis … and emoji 👨🏿🚒"
results = self.client.messages.search(self.server, criteria).items
self.assertEqual(0, len(results))
def test_spam_analysis(self):
target_id = self.emails[0].id
result = self.client.analysis.spam(target_id)
for rule in result.spam_filter_results.spam_assassin:
self.assertIsNotNone(rule.rule)
self.assertIsNotNone(rule.description)
def test_delete(self):
target_email_id = self.emails[4].id
self.client.messages.delete(target_email_id)
# Attempting to delete again should fail
with self.assertRaises(MailosaurException):
self.client.messages.delete(target_email_id)
def test_create_and_send_with_text(self):
if self.verified_domain is None:
pytest.skip("Requires verified domain secret")
subject = "New message"
options = MessageCreateOptions(
"anything@%s" % (self.verified_domain), True, subject, "This is a new email")
message = self.client.messages.create(self.server, options)
self.assertIsNotNone(message.id)
self.assertEqual(subject, message.subject)
def test_create_and_send_with_html(self):
if self.verified_domain is None:
pytest.skip("Requires verified domain secret")
subject = "New HTML message"
options = MessageCreateOptions(
"anything@%s" % (self.verified_domain), True, subject, html="<p>This is a new email.</p>")
message = self.client.messages.create(self.server, options)
self.assertIsNotNone(message.id)
self.assertEqual(subject, message.subject)
def test_create_and_send_with_attachment(self):
if self.verified_domain is None:
pytest.skip("Requires verified domain secret")
subject = "New message with attachment"
file = open(os.path.join(os.path.dirname(
__file__), 'resources', 'cat.png'), 'rb')
attachment = Attachment()
attachment.file_name = "cat.png"
attachment.content = base64.b64encode(file.read()).decode('ascii')
attachment.content_type = "image/png"
attachments = [attachment]
options = MessageCreateOptions("anything@%s" % (self.verified_domain), True,
subject, html="<p>This is a new email.</p>", attachments=attachments)
message = self.client.messages.create(self.server, options)
self.assertEqual(1, len(message.attachments))
file1 = message.attachments[0]
self.assertIsNotNone(file1.id)
self.assertIsNotNone(file1.url)
self.assertEqual(82138, file1.length)
self.assertEqual("cat.png", file1.file_name)
self.assertEqual("image/png", file1.content_type)
def test_forward_with_text(self):
if self.verified_domain is None:
pytest.skip("Requires verified domain secret")
body = "Forwarded message"
options = MessageForwardOptions(
"anything@%s" % (self.verified_domain), body)
message = self.client.messages.forward(self.emails[0].id, options)
self.assertIsNotNone(message.id)
self.assertTrue(body in message.text.body)
def test_forward_with_html(self):
if self.verified_domain is None:
pytest.skip("Requires verified domain secret")
body = "<p>Forwarded <strong>HTML</strong> message.</p>"
options = MessageForwardOptions(
"anything@%s" % (self.verified_domain), html=body)
message = self.client.messages.forward(self.emails[0].id, options)
self.assertIsNotNone(message.id)
self.assertTrue(body in message.html.body)
def test_reply_with_text(self):
if self.verified_domain is None:
pytest.skip("Requires verified domain secret")
body = "Reply message body"
options = MessageReplyOptions(body)
message = self.client.messages.reply(self.emails[0].id, options)
self.assertIsNotNone(message.id)
self.assertTrue(body in message.text.body)
def test_reply_with_html(self):
if self.verified_domain is None:
pytest.skip("Requires verified domain secret")
body = "<p>Reply <strong>HTML</strong> message body.</p>"
options = MessageReplyOptions(html=body)
message = self.client.messages.reply(self.emails[0].id, options)
self.assertIsNotNone(message.id)
self.assertTrue(body in message.html.body)
def test_reply_with_attachment(self):
if self.verified_domain is None:
pytest.skip("Requires verified domain secret")
body = "<p>Reply with attachment.</p>"
file = open(os.path.join(os.path.dirname(
__file__), 'resources', 'cat.png'), 'rb')
attachment = Attachment()
attachment.file_name = "cat.png"
attachment.content = base64.b64encode(file.read()).decode('ascii')
attachment.content_type = "image/png"
options = MessageReplyOptions(html=body, attachments=[attachment])
message = self.client.messages.reply(self.emails[0].id, options)
self.assertEqual(1, len(message.attachments))
file1 = message.attachments[0]
self.assertIsNotNone(file1.id)
self.assertIsNotNone(file1.url)
self.assertEqual(82138, file1.length)
self.assertEqual("cat.png", file1.file_name)
self.assertEqual("image/png", file1.content_type)
def validate_email(self, email):
self.validate_metadata(email)
self.validate_attachments(email)
self.validate_html(email)
self.validate_text(email)
self.assertIsNotNone(email.metadata.ehlo)
self.assertIsNotNone(email.metadata.mail_from)
self.assertEqual(1, len(email.metadata.rcpt_to))
def validate_email_summary(self, email):
self.validate_metadata(email)
self.assertIsNotNone(email.summary)
self.assertEqual(2, email.attachments)
def validate_html(self, email):
# Html.Body
self.assertTrue(email.html.body.startswith("<div dir=\"ltr\">"))
# Html.Links
self.assertEqual(3, len(email.html.links))
self.assertEqual("https://mailosaur.com/", email.html.links[0].href)
self.assertEqual("mailosaur", email.html.links[0].text)
self.assertEqual("https://mailosaur.com/", email.html.links[1].href)
self.assertIsNone(email.html.links[1].text)
self.assertEqual("http://invalid/", email.html.links[2].href)
self.assertEqual("invalid", email.html.links[2].text)
# Html.Codes
self.assertEqual(2, len(email.html.codes))
self.assertEqual("123456", email.html.codes[0].value)
self.assertEqual("G3H1Y2", email.html.codes[1].value)
# Html.Images
self.assertTrue(email.html.images[1].src.startswith('cid:'))
self.assertEqual("Inline image 1", email.html.images[1].alt)
def validate_text(self, email):
# Text.Body
self.assertTrue(email.text.body.startswith("this is a test"))
# Text.Links
self.assertEqual(2, len(email.text.links))
self.assertEqual("https://mailosaur.com/", email.text.links[0].href)
self.assertEqual(email.text.links[0].href, email.text.links[0].text)
self.assertEqual("https://mailosaur.com/", email.text.links[1].href)
self.assertEqual(email.text.links[1].href, email.text.links[1].text)
# Text.Codes
self.assertEqual(2, len(email.text.codes))
self.assertEqual("654321", email.text.codes[0].value)
self.assertEqual("5H0Y2", email.text.codes[1].value)
def validate_headers(self, email):
expected_from_header = "%s <%s>" % (
email.sender[0].name, email.sender[0].email)
expected_to_header = "%s <%s>" % (email.to[0].name, email.to[0].email)
headers = email.metadata.headers
# Invalid python3 syntax
# print [h for h in headers if h.field.lower() == "from"]
# self.assertEqual(expected_from_header, [h for h in headers if h.field.lower() == "from"][0].value)
# self.assertEqual(expected_from_header, [h for h in headers if h.field.lower() == "to"][0].value)
# self.assertEqual(email.subject, [h for h in headers if h.field.lower() == "subject"][0].value)
def validate_metadata(self, email):
self.assertEqual(1, len(email.sender))
self.assertEqual(1, len(email.to))
self.assertIsNotNone(email.sender[0].email)
self.assertIsNotNone(email.sender[0].name)
self.assertIsNotNone(email.to[0].email)
self.assertIsNotNone(email.to[0].name)
self.assertIsNotNone(email.subject)
self.assertIsNotNone(email.server)
self.assertEqual(datetime.strftime(datetime.now(), '%Y-%m-%d'),
datetime.strftime(email.received, '%Y-%m-%d'))
def validate_attachments(self, email):
self.assertEqual(2, len(email.attachments))
file1 = email.attachments[0]
self.assertIsNotNone(file1.id)
self.assertIsNotNone(file1.url)
self.assertEqual(82138, file1.length)
self.assertEqual("cat.png", file1.file_name)
self.assertEqual("image/png", file1.content_type)
file2 = email.attachments[1]
self.assertIsNotNone(file2.id)
self.assertIsNotNone(file2.url)
self.assertEqual(212080, file2.length)
self.assertEqual("dog.png", file2.file_name)
self.assertEqual("image/png", file2.content_type)
if __name__ == '__main__':
unittest.main()
| 40.785523
| 141
| 0.662525
|
751c745fb29ec82c2b536d236626de41315b585a
| 200
|
py
|
Python
|
chillapi/app/robots.py
|
andrescevp/chillapi
|
1c5f07600748eb65f413ad19f5e67653cce8b787
|
[
"Apache-2.0"
] | 2
|
2021-05-05T10:54:34.000Z
|
2021-05-05T12:45:18.000Z
|
chillapi/app/robots.py
|
andrescevp/chillapi
|
1c5f07600748eb65f413ad19f5e67653cce8b787
|
[
"Apache-2.0"
] | null | null | null |
chillapi/app/robots.py
|
andrescevp/chillapi
|
1c5f07600748eb65f413ad19f5e67653cce8b787
|
[
"Apache-2.0"
] | null | null | null |
from flask import make_response
def register_routes(app):
"""
:param app:
"""
@app.route("/robots.txt")
def profile():
""" """
return make_response(None, 200)
| 13.333333
| 39
| 0.55
|
1f0637a8ddc2901a7f50323facf7631cb85230a8
| 5,354
|
py
|
Python
|
2016/puzzle22.py
|
AwesomeGitHubRepos/adventofcode
|
84ba7963a5d7905973f14bb1c2e3a59165f8b398
|
[
"MIT"
] | 96
|
2018-04-21T07:53:34.000Z
|
2022-03-15T11:00:02.000Z
|
2016/puzzle22.py
|
AwesomeGitHubRepos/adventofcode
|
84ba7963a5d7905973f14bb1c2e3a59165f8b398
|
[
"MIT"
] | 17
|
2019-02-07T05:14:47.000Z
|
2021-12-27T12:11:04.000Z
|
2016/puzzle22.py
|
AwesomeGitHubRepos/adventofcode
|
84ba7963a5d7905973f14bb1c2e3a59165f8b398
|
[
"MIT"
] | 14
|
2019-02-05T06:34:15.000Z
|
2022-01-24T17:35:00.000Z
|
import os.path
from bisect import bisect_left
from collections import namedtuple
from heapq import heapify, heappush, heappop
from itertools import count, islice
from operator import attrgetter
HERE = os.path.dirname(os.path.abspath(__file__))
class PriorityQueue:
def __init__(self, *initial):
self._queue = []
self._count = count()
for pri, item in initial:
self.put(pri, item)
heapify(self._queue)
def __len__(self):
return len(self._queue)
def put(self, pri, item):
heappush(self._queue, (pri, next(self._count), item))
def get(self):
if not self:
raise ValueError('Queue is empty')
return heappop(self._queue)[-1]
class Server(namedtuple('ServerBase', 'x y size used available perc')):
@classmethod
def from_line(cls, line):
fs, *sizes, perc = line.split()
size, used, avail = (int(v.rstrip('T')) for v in sizes)
x, y = (int(v.lstrip('xy')) for v in fs.split('-')[-2:])
return cls(x, y, size, used, avail, int(perc.rstrip('%')))
GridInfo = namedtuple('GridInfo', 'constraints avail grid')
class State:
def __init__(self, focus, gridinfo, steps=0):
self.focus = focus
self.gridinfo = gridinfo
self.steps = steps
def __eq__(self, other):
return self.focus == other.focus
def __hash__(self):
return hash(self.focus)
def __repr__(self):
return '<State(({0.focus[0]}, {0.focus[1]}), {0.steps})>'.format(
self)
def moves(self):
cols, rows = self.gridinfo.constraints
avail, grid = self.gridinfo.avail, self.gridinfo.grid
for dx, dy in ((-1, 0), (1, 0), (0, -1), (0, 1)):
x, y = self.focus[0] + dx, self.focus[1] + dy
if not (0 <= x < cols and 0 <= y < rows):
continue
if grid[x][y].used > avail:
# this server can never offload their data *anywhere*
continue
yield State((x, y), self.gridinfo, self.steps + 1)
def heuristic(self, target):
x, y = self.focus
return abs(x - target.x) + abs(y - target.y) + self.steps
def shortest_path(start, goal):
queue = PriorityQueue((start.heuristic(goal), start))
open_ = {start: 0}
closed = set()
while open_:
current = queue.get()
if open_.get(current) != current.steps:
# ignore items in the queue for which a shorter
# path exists
continue
if current.focus == (goal.x, goal.y):
return current.steps
del open_[current]
closed.add(current)
for neighbor in current.moves():
if neighbor in closed:
continue
if neighbor.steps >= open_.get(neighbor, float('inf')):
# not a shorter path than we already have
continue
open_[neighbor] = neighbor.steps
queue.put(neighbor.heuristic(goal), neighbor)
def free_data(servers):
cols = max(s.x for s in servers) + 1
rows = max(s.y for s in servers) + 1
avail = max(s.available for s in servers)
empty_server = next(s for s in servers if s.used == 0)
grid = [[None] * rows for _ in range(cols)]
for s in servers:
grid[s.x][s.y] = s
gridinfo = GridInfo((cols, rows), avail, grid)
# two stages to solve:
# 1) get the empty data spot to the target server
start = State((empty_server.x, empty_server.y), gridinfo)
part1 = shortest_path(start, grid[cols - 1][0])
# 2) get the target server data to (0, 0); the target is in actual
# fact already 1 step closer, and each step is actually 5 to move
# the hole around and move the server data.
start = State((grid[cols - 1][0].x, grid[cols - 1][0].y), gridinfo)
part2 = (shortest_path(start, grid[0][0]) - 1) * 5
return part1 + part2
def count_viable(servers):
byavail = sorted(servers, key=attrgetter('available'))
availability = [s.available for s in byavail]
total = 0
for i, server in enumerate(byavail):
if not server.used:
continue
insertion_pos = bisect_left(availability, server.used)
viable = len(availability) - insertion_pos
if insertion_pos < i:
# remove this server from the count
viable -= 1
total += viable
return total
def test():
print('Star 2 test')
servers = [Server.from_line(l) for l in '''\
/dev/grid/node-x0-y0 10T 8T 2T 80%
/dev/grid/node-x0-y1 11T 6T 5T 54%
/dev/grid/node-x0-y2 32T 28T 4T 87%
/dev/grid/node-x1-y0 9T 7T 2T 77%
/dev/grid/node-x1-y1 8T 0T 8T 0%
/dev/grid/node-x1-y2 11T 7T 4T 63%
/dev/grid/node-x2-y0 10T 6T 4T 60%
/dev/grid/node-x2-y1 9T 8T 1T 88%
/dev/grid/node-x2-y2 9T 6T 3T 66%
'''.splitlines()]
assert free_data(servers) == 7
print('Tests passed')
if __name__ == '__main__':
import sys
if '-t' in sys.argv:
test()
sys.exit(0)
with open(os.path.join(HERE, 'puzzle22_input.txt'), 'r') as nodes:
servers = [Server.from_line(l) for l in islice(nodes, 2, None)]
print('Star 1:', count_viable(servers))
print('Star 2:', free_data(servers))
| 30.770115
| 73
| 0.582742
|
e9a12b23027c59ccd8c6a66b9adbec158d575fca
| 3,207
|
py
|
Python
|
HuaweiDialGrpc/parse_config.py
|
caowei49/elk-plugin
|
6d53e7c8cdd16a60fc7a17680510ee0ddc93a6da
|
[
"Apache-2.0"
] | null | null | null |
HuaweiDialGrpc/parse_config.py
|
caowei49/elk-plugin
|
6d53e7c8cdd16a60fc7a17680510ee0ddc93a6da
|
[
"Apache-2.0"
] | null | null | null |
HuaweiDialGrpc/parse_config.py
|
caowei49/elk-plugin
|
6d53e7c8cdd16a60fc7a17680510ee0ddc93a6da
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import json
def get_config_data():
if os.path.exists(os.path.abspath(os.path.join(os.path.dirname(__file__), "conf", "config.json"))):
conf_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "conf", "config.json"))
with open(conf_path, 'r') as f:
try:
data = json.load(f)
except Exception as e:
sys.stderr.write(": Error : conf.json format error !\n")
return data
else:
sys.stderr.write(": Error : config.json is not found !\n")
def get_address():
data = get_config_data()
add = []
container = data.get('routers')
for con in container:
add.append(con.get('address'))
return add, container
def get_socket_address():
data = get_config_data()
socket_address = data.get('socket_address')
return socket_address
def get_json_dict():
try:
config_dict = {}
address, container = get_address()
if not container:
sys.stderr.write(": Error :config.json is empty \n!")
for add in address:
if not add:
sys.stderr.write(": Error :config.json is not configuration address !\n")
break
conf = ConfigJson()
metadata = conf.get_config_metadata(add, container)
paths = conf.get_config_paths(add, container)
request_id = conf.get_config_request_id(add, container)
sample_interval = conf.get_config_sample_interval(add, container)
node_dict = {}
node_dict['metadata'] = metadata
node_dict['paths'] = paths
node_dict['request_id'] = request_id
node_dict['sample_interval'] = sample_interval
config_dict[add] = node_dict
except Exception:
sys.stderr.write(": Error :config.json parsing failed !\n")
return config_dict
class ConfigJson(object):
def __init__(self):
self.metadata = None
self.paths = None
self.request_id = None
self.sample_interval = None
def get_config_metadata(self, add, container):
for contain_dict in container:
if contain_dict.get("address") == add:
self.metadata = (
('username', contain_dict.get('aaa').get("username")),
('password', contain_dict.get('aaa').get("password")))
return self.metadata
def get_config_paths(self, add, container):
for contain_dict in container:
if contain_dict.get("address") == add:
self.paths = contain_dict.get('Paths')
return self.paths
def get_config_request_id(self, add, container):
for contain_dict in container:
if contain_dict.get("address") == add:
self.request_id = contain_dict.get('request_id')
return self.request_id
def get_config_sample_interval(self, add, container):
for contain_dict in container:
if contain_dict.get("address") == add:
self.sample_interval = contain_dict.get('sample_interval')
return self.sample_interval
| 33.40625
| 103
| 0.602744
|
c6352b2a98c14a5baa9acbaa279646e34e4a6405
| 5,040
|
py
|
Python
|
tensorflow/python/ops/conv2d_benchmark.py
|
shreyanshp/tensorflow
|
77867318b3c89e38828e787f3948ccae21bc0693
|
[
"Apache-2.0"
] | 1
|
2020-07-19T13:38:13.000Z
|
2020-07-19T13:38:13.000Z
|
tensorflow/python/ops/conv2d_benchmark.py
|
shreyanshp/tensorflow
|
77867318b3c89e38828e787f3948ccae21bc0693
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/ops/conv2d_benchmark.py
|
shreyanshp/tensorflow
|
77867318b3c89e38828e787f3948ccae21bc0693
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for Conv2D op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import time
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def build_graph(device, input_shape, filter_shape, strides, padding, num_iters):
"""builds a graph containing a sequence of conv2d operations.
Args:
device: String, the device to run on.
input_shape: Shape of the input tensor.
filter_shape: Shape of the filter tensor.
strides: A list of ints. 1-D of length 4. The stride of sliding
window for each dimension of input.
padding: A string from: "SAME", "VALID". The type of padding
algorithm to use.
num_iters: number of iterations to run conv2d.
Returns:
An array of tensors to run()
"""
with ops.device("/%s:0" % device):
inp = variables.Variable(random_ops.truncated_normal(input_shape))
filt = variables.Variable(random_ops.truncated_normal(filter_shape))
outputs = []
conv2d_op = nn_ops.conv2d(inp, filt, strides, padding, data_format="NHWC")
outputs.append(conv2d_op)
for _ in range(1, num_iters):
with ops.control_dependencies([conv2d_op]):
conv2d_op = nn_ops.conv2d(
inp, filt, strides, padding, data_format="NHWC")
outputs.append(conv2d_op)
return control_flow_ops.group(*outputs)
class Conv2DBenchmark(test.Benchmark):
"""Benchmark conv2d!"""
def _run_graph(self, device, input_shape, filter_shape, strides, padding,
num_iters):
"""runs the graph and print its execution time.
Args:
device: String, the device to run on.
input_shape: Shape of the input tensor.
filter_shape: Shape of the filter tensor.
strides: A list of ints. 1-D of length 4. The stride of sliding
window for each dimension of input.
padding: A string from: "SAME", "VALID". The type of padding
algorithm to use. num_iters: Number of iterations to run the
benchmark.
num_iters: number of iterations to run conv2d.
Returns:
The duration of the run in seconds.
"""
graph = ops.Graph()
with graph.as_default():
outputs = build_graph(device, input_shape, filter_shape, strides, padding,
num_iters)
with session_lib.Session(graph=graph) as session:
variables.global_variables_initializer().run()
# warmup runs
session.run(outputs)
start_time = time.time()
session.run(outputs)
duration = (time.time() - start_time) / num_iters
print("%s inputshape:%s filtershape:%s strides:%s padding:%s "
"%d iters: %.8f sec" %
(device, str(input_shape).replace(" ", ""),
str(filter_shape).replace(" ", ""),
str(strides).replace(" ", ""), padding, num_iters, duration))
name_template = (
"conv2d_{device}_input_shape_{inputshape}_filter_shape_{filtershape}_"
"strides_{strides}_padding_{padding}")
self.report_benchmark(
name=name_template.format(
device=device,
inputshape=str(input_shape).replace(" ", ""),
filtershape=str(filter_shape).replace(" ", ""),
strides=str(strides).replace(" ", ""),
padding=padding).replace(" ", ""),
iters=num_iters,
wall_time=duration / num_iters)
return duration
def benchmark_conv2d(self):
print("conv2d benchmark:")
h = 1000
w = 1000
fh = 3
fw = 3
input_shapes = []
filter_shapes = []
for b, c in itertools.product([4, 16, 32], [i for i in range(3, 16)]):
input_shapes += [[b, h, w, c]]
filter_shapes += [[fh, fw, c, b]]
strides = [[1, 2, 2, 1]]
paddings = ["VALID", "SAME"]
for ishape, fshape in zip(input_shapes, filter_shapes):
for stride in strides:
for padding in paddings:
self._run_graph("gpu", ishape, fshape, stride, padding, 80)
if __name__ == "__main__":
test.main()
| 35.492958
| 80
| 0.653175
|
f2677eb37e1539bb98d3710ba2d799d681be09c7
| 1,912
|
py
|
Python
|
mysite/myapp/models.py
|
adamgpgarcia/Galleries-Next-Door
|
dd3380db0402d1a3793981108f358d13471610ca
|
[
"MIT"
] | null | null | null |
mysite/myapp/models.py
|
adamgpgarcia/Galleries-Next-Door
|
dd3380db0402d1a3793981108f358d13471610ca
|
[
"MIT"
] | null | null | null |
mysite/myapp/models.py
|
adamgpgarcia/Galleries-Next-Door
|
dd3380db0402d1a3793981108f358d13471610ca
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
from django.utils import timezone
#the post model is for art action postings
class PostModel(models.Model):
title = models.CharField(max_length=240)
author = models.ForeignKey(User, on_delete=models.CASCADE)
published_on = models.DateTimeField(auto_now=True)
image = models.ImageField(
max_length=144,
upload_to='uploads/%Y/%m/%d/',
null=False
)
image_description = models.CharField(
max_length = 240,
null=False
)
auction_at=models.IntegerField()
current_bet = models.IntegerField(
default = 0
)
current_bet_leader = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name ="bet_leader",
null=True,
blank=True
)
likes = models.ManyToManyField(
User,
related_name="art_post",
blank=True
)
total_likes = models.IntegerField(
default = 0
)
auction_start = models.DateTimeField(
null=True,
blank=True
)
#0 = listed #1 = auction #2 = sold
current_state = models.IntegerField(
default = 0
)
def __str__(self): # uses self to label suggestions
return str(self.id) + " - " + self.author.username #string method better method on models page
#the comment model allows users to leave comments on art posts
class CommentModel(models.Model):
comment = models.CharField(max_length=240)
author = models.ForeignKey(User, on_delete=models.CASCADE)
published_on = models.DateTimeField(auto_now=True)
title = models.ForeignKey(PostModel, on_delete=models.CASCADE)
def __str__(self):
return self.comment
def last_10_messages(self):
return CommentModel.objects.order_by('-timestamp').all()[:10]
| 27.314286
| 123
| 0.654289
|
6e79b42fd0ac354030211b09e0262a0c00e7d9d1
| 6,613
|
py
|
Python
|
python/wpe.py
|
helianvine/dereverberation
|
8c93c16af2006c2065c9dc00f0b9717793fdf2b8
|
[
"MIT"
] | 115
|
2018-08-14T11:17:04.000Z
|
2022-03-01T09:50:19.000Z
|
python/wpe.py
|
liuergouxuqk/helloworld
|
8c93c16af2006c2065c9dc00f0b9717793fdf2b8
|
[
"MIT"
] | 3
|
2019-07-18T02:09:08.000Z
|
2020-05-04T17:28:27.000Z
|
python/wpe.py
|
liuergouxuqk/helloworld
|
8c93c16af2006c2065c9dc00f0b9717793fdf2b8
|
[
"MIT"
] | 43
|
2018-08-18T18:03:11.000Z
|
2022-03-28T06:32:26.000Z
|
# Created by Teng Xiang at 2018-08-10
# Current version: 2018-08-10
# https://github.com/helianvine/fdndlp
# =============================================================================
""" Weighted prediction error(WPE) method for speech dereverberation."""
import stft
import argparse
import time
import os
import numpy as np
import soundfile as sf
from numpy.lib import stride_tricks
# import matplotlib.pyplot as plt
class Configrations():
"""Argument parser for WPE method configurations."""
def __init__(self):
self.parser = argparse.ArgumentParser()
def parse(self):
self.parser.add_argument('filename')
self.parser.add_argument(
'-o', '--output', default='drv.wav',
help='output filename')
self.parser.add_argument(
'-m', '--mic_num', type=int, default=3,
help='number of input channels')
self.parser.add_argument(
'-n','--out_num', type=int, default=2,
help='number of output channels')
self.parser.add_argument(
'-p', '--order', type=int, default=30,
help='predition order')
self.cfgs = self.parser.parse_args()
return self.cfgs
class WpeMethod(object):
"""WPE metheod for speech dereverberaiton
Weighted prediction errors (WPE) method is an outstanding speech
derverberation algorithm, which is based on the multi-channel linear
prediction algorithm and produces multi-channel output.
Attributes:
channels: Number of input channels.
out_num: Number of output channels.
p: An integer number of the prediction order.
d: An integer number of the prediction delay.
frame_size: An integer number of the length of the frame
overlap: A float nonnegative number less than 1 indicating the overlap
factor between adjacent frames
"""
def __init__(self, mic_num, out_num, order=30):
self.channels = mic_num
self.out_num = out_num
self.p = order
self.d = 2
self.frame_size = 512
self.overlap = 0.5
self._iterations = 2
@property
def iterations(self):
return self._iterations
@iterations.setter
def iterations(self, value):
assert(int(value) > 0)
self._iterations = int(value)
def _display_cfgs(self):
print('\nSettings:')
print("Input channel: %d" % self.channels)
print("Output channel: %d" % self.out_num)
print("Prediction order: %d\n" % self.p)
def run_offline(self, data):
self._display_cfgs()
time_start = time.time()
print("Processing...")
drv_data = self.__fdndlp(data)
print("Done!\nTotal time: %f\n" % (time.time() - time_start))
return drv_data
def __fdndlp(self, data):
"""Frequency-domain variance-normalized delayed liner prediction
This is the core part of the WPE method. The variance-normalized
linear prediciton algorithm is implemented in each frequency bin
separately. Both the input and output signals are in time-domain.
Args:
data: A 2-dimension numpy array with shape=(chanels, samples)
Returns:
A 2-dimension numpy array with shape=(output_channels, samples)
"""
freq_data = stft.stft(
data / np.abs(data).max(),
frame_size=self.frame_size, overlap=self.overlap)
self.freq_num = freq_data.shape[-1]
drv_freq_data = freq_data[0:self.out_num].copy()
for i in range(self.freq_num):
xk = freq_data[:,:,i].T
dk = self.__ndlp(xk)
drv_freq_data[:,:,i] = dk.T
drv_data = stft.istft(
drv_freq_data,
frame_size=self.frame_size, overlap=self.overlap)
return drv_data / np.abs(drv_data).max()
def __ndlp(self, xk):
"""Variance-normalized delayed liner prediction
Here is the specific WPE algorithm implementation. The input should be
the reverberant time-frequency signal in a single frequency bin and
the output will be the dereverberated signal in the corresponding
frequency bin.
Args:
xk: A 2-dimension numpy array with shape=(frames, input_chanels)
Returns:
A 2-dimension numpy array with shape=(frames, output_channels)
"""
cols = xk.shape[0] - self.d
xk_buf = xk[:,0:self.out_num]
xk = np.concatenate(
(np.zeros((self.p - 1, self.channels)), xk),
axis=0)
xk_tmp = xk[:,::-1].copy()
frames = stride_tricks.as_strided(
xk_tmp,
shape=(self.channels * self.p, cols),
strides=(xk_tmp.strides[-1], xk_tmp.strides[-1]*self.channels))
frames = frames[::-1]
sigma2 = np.mean(1 / (np.abs(xk_buf[self.d:]) ** 2), axis=1)
for _ in range(self.iterations):
x_cor_m = np.dot(
np.dot(frames, np.diag(sigma2)),
np.conj(frames.T))
x_cor_v = np.dot(
frames,
np.conj(xk_buf[self.d:] * sigma2.reshape(-1, 1)))
coeffs = np.dot(np.linalg.inv(x_cor_m), x_cor_v)
dk = xk_buf[self.d:] - np.dot(frames.T, np.conj(coeffs))
sigma2 = np.mean(1 / (np.abs(dk) ** 2), axis=1)
return np.concatenate((xk_buf[0:self.d], dk))
def load_audio(self, filename):
data, fs = sf.read(filename, always_2d=True)
data = data.T
assert(data.shape[0] >= self.channels)
if data.shape[0] > self.channels:
print(
"The number of the input channels is %d," % data.shape[0],
"and only the first %d channels are loaded." % self.channels)
data = data[0: self.channels]
return data.copy(), fs
def write_wav(self, data, fs, filename, path='wav_out'):
if not os.path.exists(path):
os.makedirs(path)
filepath = os.path.join(path, filename)
print('Write to file: %s.' % filepath)
sf.write(filepath, data.T, fs, subtype='PCM_16')
if __name__ == '__main__':
cfgs = Configrations().parse()
# cfgs.filename = '../wav_sample/sample_4ch.wav'
wpe = WpeMethod(cfgs.mic_num, cfgs.out_num, cfgs.order)
data, fs = wpe.load_audio(cfgs.filename)
drv_data = wpe.run_offline(data)
wpe.write_wav(drv_data, fs, cfgs.output)
# plt.figure()
# spec, _ = stft.log_spectrum(drv_data[0])
# plt.pcolor(spec[0].T)
# plt.show()
| 35.553763
| 79
| 0.594284
|
ae78039e6366576af3707d07f9518cb7df7c7898
| 1,505
|
py
|
Python
|
store_backend/migratedb.py
|
EUGINELETHAL/ChRIS_store
|
b842dbfa80f29f86468fe0ebd3514aaac4898717
|
[
"MIT"
] | 11
|
2018-03-23T19:27:10.000Z
|
2021-04-30T16:40:04.000Z
|
store_backend/migratedb.py
|
EUGINELETHAL/ChRIS_store
|
b842dbfa80f29f86468fe0ebd3514aaac4898717
|
[
"MIT"
] | 46
|
2018-05-21T14:54:43.000Z
|
2022-01-28T01:37:57.000Z
|
store_backend/migratedb.py
|
EUGINELETHAL/ChRIS_store
|
b842dbfa80f29f86468fe0ebd3514aaac4898717
|
[
"MIT"
] | 11
|
2018-03-28T04:37:25.000Z
|
2021-05-28T06:40:30.000Z
|
#!/usr/bin/env python
import time
import sys
import psycopg2
from argparse import ArgumentParser
# django needs to be loaded
import django
django.setup()
from django.core.management import call_command
parser = ArgumentParser(description="Check database service connection")
parser.add_argument('-u', '--user', help="Database user name")
parser.add_argument('-p', '--password', help="Database user password")
parser.add_argument('-d', '--database', help="Database name")
parser.add_argument('--host', help="Database host")
parser.add_argument('--max-attempts', type=int, dest='attempts',
help="Maximum number of connection attempts")
parser.add_argument('--noinput', action='store_true',
help="Perform migrations in non-interactive mode")
# Parse the arguments and perform the appropriate action
args = parser.parse_args()
host = args.host if args.host else 'localhost'
max_tries = args.attempts if args.attempts else 30
db = None
while max_tries > 0 and db is None:
try:
db = psycopg2.connect(host=host, user=args.user, password=args.password,
dbname=args.database)
except Exception:
time.sleep(5)
max_tries -= 1
if db is None:
print('Could not connect to database service!')
sys.exit(1)
else:
print('Database service ready to accept connections!')
if args.noinput:
call_command("migrate", interactive=False)
else:
call_command("migrate", interactive=True)
| 30.714286
| 80
| 0.695017
|
b9a44813e1c52e5b9b4262590ca7a8aece58e806
| 2,144
|
py
|
Python
|
examples/demo_rnn_torch.py
|
vishalbelsare/iisignature
|
fd26aeaddac0577d44163586f8394d4255cca586
|
[
"MIT"
] | 52
|
2016-06-02T14:51:49.000Z
|
2022-03-02T12:03:35.000Z
|
examples/demo_rnn_torch.py
|
ViewFuture/iisignature
|
14084c0fb5661943ef944ff23492c6aeb50d7896
|
[
"MIT"
] | 16
|
2017-08-15T13:52:20.000Z
|
2022-01-15T18:17:37.000Z
|
examples/demo_rnn_torch.py
|
ViewFuture/iisignature
|
14084c0fb5661943ef944ff23492c6aeb50d7896
|
[
"MIT"
] | 16
|
2016-12-03T20:41:07.000Z
|
2022-01-06T11:40:23.000Z
|
#A trivial demonstration of the RecurrentSig layer from iisignature_recurrent_torch.py
#No assertion is made that this model is a good idea, or that this code is idiomatic pytorch.
import numpy as np, sys, os, itertools
import torch
from torch.autograd import Variable
import torch.nn as nn
#add the parent directory, so we find our iisignature build if it was built --inplace
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from iisignature_recurrent_torch import RecurrentSig
criterion = nn.MSELoss()
#The task here for the network to learn is very easy - the average of two of the inputs
x = np.random.uniform(size=(2311,5,3))
y = (x[:,1,1] + x[:,3,2])/2 # The output is a number between 0 and 1, so matches sigmoid activation of the top layer
testx = x[2000:,:,:]
testy = y[2000:]
x=x[:2000,:,:]
y=y[:2000]
rnn=RecurrentSig(3,5,sig_level=2,use_signatures = False, output_signatures = False, train_time_lapse=False)
finalLayer=nn.Linear(5,1)
optimizer=torch.optim.Adam(itertools.chain(rnn.parameters(),finalLayer.parameters()),lr=0.0001)
minibatch_size = 32
def train(x_batch, y_batch):
minibatch_size=y_batch.shape[0]
hidden = rnn.initHidden(minibatch_size)
optimizer.zero_grad()
x_batch=Variable(torch.FloatTensor(x_batch))
for i in range(5):
output,hidden = rnn(x_batch[:,i,:], hidden)
output=finalLayer(output)
loss=criterion(output,Variable(torch.FloatTensor(y_batch)))
loss.backward()
optimizer.step()
return output, loss.data[0]
def predict(x):
hidden = rnn.initHidden(x.shape[0])
x=Variable(torch.FloatTensor(x))
for i in range(5):
output,hidden = rnn(x[:,i,:], hidden)
output=finalLayer(output)
return output
def evaluate(x,y):
loss=criterion(predict(x),Variable(torch.FloatTensor(y)))
return loss
nb_epoch=2
for i in range(nb_epoch*x.shape[0]//minibatch_size):
indices = np.random.randint(testx.shape[0],size=minibatch_size)
output,loss=train(x[indices],y[indices])
#print (loss)
#a=np.random.uniform(size=(3,5,3))
#print (a, predict(a).data)
print (evaluate(testx,testy).data)
| 31.529412
| 116
| 0.71875
|
706d545797321cfc6763bc6f6e5cadbeb77ad510
| 115,139
|
py
|
Python
|
geemap/eefolium.py
|
Julietarellanoo/geemap
|
63ef1300f65cd3da539d96218bf0ac610d4e826c
|
[
"MIT"
] | 1
|
2020-11-26T09:47:21.000Z
|
2020-11-26T09:47:21.000Z
|
geemap/eefolium.py
|
liangliang12/geemap
|
63ef1300f65cd3da539d96218bf0ac610d4e826c
|
[
"MIT"
] | null | null | null |
geemap/eefolium.py
|
liangliang12/geemap
|
63ef1300f65cd3da539d96218bf0ac610d4e826c
|
[
"MIT"
] | null | null | null |
""" This module extends the folium Map class. It is designed to be used in Google Colab, as Google Colab currently does not support ipyleaflet.
"""
import ee
import folium
from folium import plugins
from .common import *
from .conversion import *
# More WMS basemaps can be found at https://viewer.nationalmap.gov/services/
ee_basemaps = {
"ROADMAP": folium.TileLayer(
tiles="https://mt1.google.com/vt/lyrs=m&x={x}&y={y}&z={z}",
attr="Google",
name="Google Maps",
overlay=True,
control=True,
),
"SATELLITE": folium.TileLayer(
tiles="https://mt1.google.com/vt/lyrs=s&x={x}&y={y}&z={z}",
attr="Google",
name="Google Satellite",
overlay=True,
control=True,
),
"TERRAIN": folium.TileLayer(
tiles="https://mt1.google.com/vt/lyrs=p&x={x}&y={y}&z={z}",
attr="Google",
name="Google Terrain",
overlay=True,
control=True,
),
"HYBRID": folium.TileLayer(
tiles="https://mt1.google.com/vt/lyrs=y&x={x}&y={y}&z={z}",
attr="Google",
name="Google Satellite",
overlay=True,
control=True,
),
"ESRI": folium.TileLayer(
tiles="https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}",
attr="Esri",
name="Esri Satellite",
overlay=True,
control=True,
),
"Esri Ocean": folium.TileLayer(
tiles="https://services.arcgisonline.com/ArcGIS/rest/services/Ocean/World_Ocean_Base/MapServer/tile/{z}/{y}/{x}",
attr="Esri",
name="Esri Ocean",
overlay=True,
control=True,
),
"Esri Satellite": folium.TileLayer(
tiles="https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}",
attr="Esri",
name="Esri Satellite",
overlay=True,
control=True,
),
"Esri Standard": folium.TileLayer(
tiles="https://server.arcgisonline.com/ArcGIS/rest/services/World_Street_Map/MapServer/tile/{z}/{y}/{x}",
attr="Esri",
name="Esri Standard",
overlay=True,
control=True,
),
"Esri Terrain": folium.TileLayer(
tiles="https://server.arcgisonline.com/ArcGIS/rest/services/World_Terrain_Base/MapServer/tile/{z}/{y}/{x}",
attr="Esri",
name="Esri Terrain",
overlay=True,
control=True,
),
"Esri Transportation": folium.TileLayer(
tiles="https://server.arcgisonline.com/ArcGIS/rest/services/Reference/World_Transportation/MapServer/tile/{z}/{y}/{x}",
attr="Esri",
name="Esri Transportation",
overlay=True,
control=True,
),
"Esri Topo World": folium.TileLayer(
tiles="https://services.arcgisonline.com/ArcGIS/rest/services/World_Topo_Map/MapServer/tile/{z}/{y}/{x}",
attr="Esri",
name="Esri Topo World",
overlay=True,
control=True,
),
"Esri National Geographic": folium.TileLayer(
tiles="http://services.arcgisonline.com/ArcGIS/rest/services/NatGeo_World_Map/MapServer/tile/{z}/{y}/{x}",
attr="Esri",
name="Esri National Geographic",
overlay=True,
control=True,
),
"Esri Shaded Relief": folium.TileLayer(
tiles="https://services.arcgisonline.com/arcgis/rest/services/World_Shaded_Relief/MapServer/tile/{z}/{y}/{x}",
attr="Esri",
name="Esri Shaded Relief",
overlay=True,
control=True,
),
"Esri Physical Map": folium.TileLayer(
tiles="https://services.arcgisonline.com/arcgis/rest/services/World_Physical_Map/MapServer/tile/{z}/{y}/{x}",
attr="Esri",
name="Esri Physical Map",
overlay=True,
control=True,
),
"Bing VirtualEarth": folium.TileLayer(
tiles="http://ecn.t3.tiles.virtualearth.net/tiles/a{q}.jpeg?g=1",
attr="Microsoft",
name="Bing VirtualEarth",
overlay=True,
control=True,
),
"3DEP Elevation": folium.WmsTileLayer(
url="https://elevation.nationalmap.gov/arcgis/services/3DEPElevation/ImageServer/WMSServer?",
layers="3DEPElevation:None",
attr="USGS",
name="3DEP Elevation",
overlay=True,
control=True,
),
"NAIP Imagery": folium.WmsTileLayer(
url="https://services.nationalmap.gov/arcgis/services/USGSNAIPImagery/ImageServer/WMSServer?",
layers="0",
attr="USGS",
name="NAIP Imagery",
overlay=True,
control=True,
),
}
class Map(folium.Map):
"""The Map class inherits from folium.Map. By default, the Map will add Google Maps as the basemap. Set add_google_map = False to use OpenStreetMap as the basemap.
Returns:
object: folium map object.
"""
def __init__(self, **kwargs):
import logging
logging.getLogger("googleapiclient.discovery_cache").setLevel(logging.ERROR)
if "use_ee" not in kwargs.keys():
kwargs["use_ee"] = True
if kwargs["use_ee"]:
ee_initialize()
if "ee_initialize" not in kwargs.keys():
kwargs["ee_initialize"] = True
if kwargs["ee_initialize"]:
ee_initialize()
# Default map center location and zoom level
latlon = [40, -100]
zoom = 4
# Interchangeable parameters between ipyleaflet and folium
if "center" in kwargs.keys():
kwargs["location"] = kwargs["center"]
kwargs.pop("center")
if "location" in kwargs.keys():
latlon = kwargs["location"]
else:
kwargs["location"] = latlon
if "zoom" in kwargs.keys():
kwargs["zoom_start"] = kwargs["zoom"]
kwargs.pop("zoom")
if "zoom_start" in kwargs.keys():
zoom = kwargs["zoom_start"]
else:
kwargs["zoom_start"] = zoom
if "add_google_map" not in kwargs.keys():
kwargs["add_google_map"] = True
if "plugin_LatLngPopup" not in kwargs.keys():
kwargs["plugin_LatLngPopup"] = True
if "plugin_Fullscreen" not in kwargs.keys():
kwargs["plugin_Fullscreen"] = True
if "plugin_Draw" not in kwargs.keys():
kwargs["plugin_Draw"] = False
if "Draw_export" not in kwargs.keys():
kwargs["Draw_export"] = True
if "plugin_MiniMap" not in kwargs.keys():
kwargs["plugin_MiniMap"] = False
if "plugin_LayerControl" not in kwargs.keys():
kwargs["plugin_LayerControl"] = False
super().__init__(**kwargs)
if kwargs.get("add_google_map"):
ee_basemaps["ROADMAP"].add_to(self)
if kwargs.get("plugin_LatLngPopup"):
folium.LatLngPopup().add_to(self)
if kwargs.get("plugin_Fullscreen"):
plugins.Fullscreen().add_to(self)
if kwargs.get("plugin_Draw"):
plugins.Draw(export=kwargs.get("Draw_export")).add_to(self)
if kwargs.get("plugin_MiniMap"):
plugins.MiniMap().add_to(self)
if kwargs.get("plugin_LayerControl"):
folium.LayerControl().add_to(self)
self.fit_bounds([latlon, latlon], max_zoom=zoom)
def setOptions(self, mapTypeId="HYBRID", styles={}, types=[]):
"""Adds Google basemap to the map.
Args:
mapTypeId (str, optional): A mapTypeId to set the basemap to. Can be one of "ROADMAP", "SATELLITE", "HYBRID" or "TERRAIN" to select one of the standard Google Maps API map types. Defaults to 'HYBRID'.
styles ([type], optional): A dictionary of custom MapTypeStyle objects keyed with a name that will appear in the map's Map Type Controls. Defaults to None.
types ([type], optional): A list of mapTypeIds to make available. If omitted, but opt_styles is specified, appends all of the style keys to the standard Google Maps API map types.. Defaults to None.
"""
try:
ee_basemaps[mapTypeId].add_to(self)
except:
print(
"Basemap can only be one of the following: {}".format(
", ".join(ee_basemaps.keys())
)
)
set_options = setOptions
def add_basemap(self, basemap="HYBRID"):
"""Adds a basemap to the map.
Args:
basemap (str, optional): Can be one of string from ee_basemaps. Defaults to 'HYBRID'.
"""
try:
ee_basemaps[basemap].add_to(self)
except:
print(
"Basemap can only be one of the following: {}".format(
", ".join(ee_basemaps.keys())
)
)
def add_layer(
self, ee_object, vis_params={}, name="Layer untitled", shown=True, opacity=1.0
):
"""Adds a given EE object to the map as a layer.
Args:
ee_object (Collection|Feature|Image|MapId): The object to add to the map.
vis_params (dict, optional): The visualization parameters. Defaults to {}.
name (str, optional): The name of the layer. Defaults to 'Layer untitled'.
shown (bool, optional): A flag indicating whether the layer should be on by default. Defaults to True.
opacity (float, optional): The layer's opacity represented as a number between 0 and 1. Defaults to 1.
"""
image = None
if (
not isinstance(ee_object, ee.Image)
and not isinstance(ee_object, ee.ImageCollection)
and not isinstance(ee_object, ee.FeatureCollection)
and not isinstance(ee_object, ee.Feature)
and not isinstance(ee_object, ee.Geometry)
):
err_str = "\n\nThe image argument in 'addLayer' function must be an instace of one of ee.Image, ee.Geometry, ee.Feature or ee.FeatureCollection."
raise AttributeError(err_str)
if (
isinstance(ee_object, ee.geometry.Geometry)
or isinstance(ee_object, ee.feature.Feature)
or isinstance(ee_object, ee.featurecollection.FeatureCollection)
):
features = ee.FeatureCollection(ee_object)
width = 2
if "width" in vis_params:
width = vis_params["width"]
color = "000000"
if "color" in vis_params:
color = vis_params["color"]
image_fill = features.style(**{"fillColor": color}).updateMask(
ee.Image.constant(0.5)
)
image_outline = features.style(
**{"color": color, "fillColor": "00000000", "width": width}
)
image = image_fill.blend(image_outline)
elif isinstance(ee_object, ee.image.Image):
image = ee_object
elif isinstance(ee_object, ee.imagecollection.ImageCollection):
image = ee_object.mosaic()
map_id_dict = ee.Image(image).getMapId(vis_params)
folium.raster_layers.TileLayer(
tiles=map_id_dict["tile_fetcher"].url_format,
attr="Google Earth Engine",
name=name,
overlay=True,
control=True,
show=shown,
opacity=opacity,
).add_to(self)
addLayer = add_layer
def set_center(self, lon, lat, zoom=10):
"""Centers the map view at a given coordinates with the given zoom level.
Args:
lon (float): The longitude of the center, in degrees.
lat (float): The latitude of the center, in degrees.
zoom (int, optional): The zoom level, from 1 to 24. Defaults to 10.
"""
self.fit_bounds([[lat, lon], [lat, lon]], max_zoom=zoom)
setCenter = set_center
def center_object(self, ee_object, zoom=10):
"""Centers the map view on a given object.
Args:
ee_object (Element|Geometry): An Earth Engine object to center on - a geometry, image or feature.
zoom (int, optional): The zoom level, from 1 to 24. Defaults to 10.
"""
lat = 0
lon = 0
bounds = [[lat, lon], [lat, lon]]
if isinstance(ee_object, ee.geometry.Geometry):
centroid = ee_object.centroid()
lon, lat = centroid.getInfo()["coordinates"]
bounds = [[lat, lon], [lat, lon]]
elif isinstance(ee_object, ee.featurecollection.FeatureCollection):
centroid = ee_object.geometry().centroid()
lon, lat = centroid.getInfo()["coordinates"]
bounds = [[lat, lon], [lat, lon]]
elif isinstance(ee_object, ee.image.Image):
geometry = ee_object.geometry()
coordinates = geometry.getInfo()["coordinates"][0]
bounds = [coordinates[0][::-1], coordinates[2][::-1]]
elif isinstance(ee_object, ee.imagecollection.ImageCollection):
geometry = ee_object.geometry()
coordinates = geometry.getInfo()["coordinates"][0]
bounds = [coordinates[0][::-1], coordinates[2][::-1]]
else:
bounds = [[0, 0], [0, 0]]
self.fit_bounds(bounds, max_zoom=zoom)
centerObject = center_object
def set_control_visibility(
self, layerControl=True, fullscreenControl=True, latLngPopup=True
):
"""Sets the visibility of the controls on the map.
Args:
layerControl (bool, optional): Whether to show the control that allows the user to toggle layers on/off. Defaults to True.
fullscreenControl (bool, optional): Whether to show the control that allows the user to make the map full-screen. Defaults to True.
latLngPopup (bool, optional): Whether to show the control that pops up the Lat/lon when the user clicks on the map. Defaults to True.
"""
if layerControl:
folium.LayerControl().add_to(self)
if fullscreenControl:
plugins.Fullscreen().add_to(self)
if latLngPopup:
folium.LatLngPopup().add_to(self)
setControlVisibility = set_control_visibility
def add_layer_control(self):
"""Adds layer basemap to the map."""
folium.LayerControl().add_to(self)
addLayerControl = add_layer_control
def add_wms_layer(
self,
url,
layers,
name=None,
attribution="",
overlay=True,
control=True,
shown=True,
format="image/png",
):
"""Add a WMS layer to the map.
Args:
url (str): The URL of the WMS web service.
layers (str): Comma-separated list of WMS layers to show.
name (str, optional): The layer name to use on the layer control. Defaults to None.
attribution (str, optional): The attribution of the data layer. Defaults to ''.
overlay (str, optional): Allows overlay. Defaults to True.
control (str, optional): Adds the layer to the layer control. Defaults to True.
shown (bool, optional): A flag indicating whether the layer should be on by default. Defaults to True.
format (str, optional): WMS image format (use ‘image/png’ for layers with transparency). Defaults to 'image/jpeg'.
"""
try:
folium.raster_layers.WmsTileLayer(
url=url,
layers=layers,
attr=attribution,
name=name,
overlay=overlay,
control=control,
show=shown,
).add_to(self)
except:
print("Failed to add the specified WMS TileLayer.")
def add_tile_layer(
self,
tiles="OpenStreetMap",
name=None,
attribution="",
overlay=True,
control=True,
shown=True,
opacity=1.0,
API_key=None,
):
"""Add a XYZ tile layer to the map.
Args:
tiles (str): The URL of the XYZ tile service.
name (str, optional): The layer name to use on the layer control. Defaults to None.
attribution (str, optional): The attribution of the data layer. Defaults to ''.
overlay (str, optional): Allows overlay. Defaults to True.
control (str, optional): Adds the layer to the layer control. Defaults to True.
shown (bool, optional): A flag indicating whether the layer should be on by default. Defaults to True.
opacity (float, optional): Sets the opacity for the layer.
API_key (str, optional): – API key for Cloudmade or Mapbox tiles. Defaults to True.
"""
try:
folium.raster_layers.TileLayer(
tiles=tiles,
name=name,
attr=attribution,
overlay=overlay,
control=control,
show=shown,
opacity=opacity,
API_key=API_key,
).add_to(self)
except:
print("Failed to add the specified TileLayer.")
def publish(
self,
name=None,
headline="Untitled",
visibility="PUBLIC",
overwrite=True,
open=True,
):
"""Publish the map to datapane.com
Args:
name (str, optional): The URL of the map. Defaults to None.
headline (str, optional): Title of the map. Defaults to 'Untitled'.
visibility (str, optional): Visibility of the map. It can be one of the following: PUBLIC, PRIVATE, ORG. Defaults to 'PUBLIC'.
overwrite (bool, optional): Whether to overwrite the existing map with the same name. Defaults to True.
open (bool, optional): Whether to open the map. Defaults to True.
"""
import webbrowser
try:
import datapane as dp
except Exception as e:
print(
"The datapane Python package is not installed. You need to install and authenticate datapane first."
)
webbrowser.open_new_tab(
"https://docs.datapane.com/tutorials/tut-getting-started"
)
return
import datapane as dp
# import logging
# logging.getLogger('googleapiclient.discovery_cache').setLevel(logging.ERROR)
if name is None:
name = "folium_" + random_string(6)
visibility = visibility.upper()
if visibility not in ["PUBLIC", "PRIVATE", "ORG"]:
visibility = "PRIVATE"
if overwrite:
delete_dp_report(name)
report = dp.Report(dp.Plot(self))
report.publish(name=name, headline=headline, visibility=visibility, open=open)
def delete_dp_report(name):
"""Deletes a datapane report.
Args:
name (str): Name of the report to delete.
"""
try:
import datapane as dp
reports = dp.Report.list()
items = list(reports)
names = list(map(lambda item: item["name"], items))
if name in names:
report = dp.Report.get(name)
url = report.blocks[0]["url"]
# print('Deleting {}...'.format(url))
dp.Report.delete(dp.Report.by_id(url))
except Exception as e:
print(e)
return
def delete_dp_reports():
"""Deletes all datapane reports."""
try:
import datapane as dp
reports = dp.Report.list()
for item in reports:
print(item["name"])
report = dp.Report.get(item["name"])
url = report.blocks[0]["url"]
print("Deleting {}...".format(url))
dp.Report.delete(dp.Report.by_id(url))
except Exception as e:
print(e)
return
# def install_from_github(url):
# """Install a package from a GitHub repository.
# Args:
# url (str): The URL of the GitHub repository.
# """
# try:
# download_dir = os.path.join(os.path.expanduser('~'), 'Downloads')
# if not os.path.exists(download_dir):
# os.makedirs(download_dir)
# repo_name = os.path.basename(url)
# zip_url = os.path.join(url, 'archive/master.zip')
# filename = repo_name + '-master.zip'
# download_from_url(url=zip_url, out_file_name=filename,
# out_dir=download_dir, unzip=True)
# pkg_dir = os.path.join(download_dir, repo_name + '-master')
# work_dir = os.getcwd()
# os.chdir(pkg_dir)
# cmd = 'pip install .'
# os.system(cmd)
# os.chdir(work_dir)
# print("\nPlease comment out 'install_from_github()' and restart the kernel to take effect:\nJupyter menu -> Kernel -> Restart & Clear Output")
# except Exception as e:
# print(e)
# def rgb_to_hex(rgb=(255, 255, 255)):
# """Converts RGB to hex color. In RGB color R stands for Red, G stands for Green, and B stands for Blue, and it ranges from the decimal value of 0 – 255.
# Args:
# rgb (tuple, optional): RGB color code as a tuple of (red, green, blue). Defaults to (255, 255, 255).
# Returns:
# str: hex color code
# """
# return '%02x%02x%02x' % rgb
# def hex_to_rgb(value='FFFFFF'):
# """Converts hex color to RGB color.
# Args:
# value (str, optional): Hex color code as a string. Defaults to 'FFFFFF'.
# Returns:
# tuple: RGB color as a tuple.
# """
# value = value.lstrip('#')
# lv = len(value)
# return tuple(int(value[i:i+lv//3], 16) for i in range(0, lv, lv//3))
# def check_color(in_color):
# """Checks the input color and returns the corresponding hex color code.
# Args:
# in_color (str or tuple): It can be a string (e.g., 'red', '#ffff00') or tuple (e.g., (255, 127, 0)).
# Returns:
# str: A hex color code.
# """
# out_color = '#000000' # default black color
# if isinstance(in_color, tuple) and len(in_color) == 3:
# if all(isinstance(item, int) for item in in_color):
# rescaled_color = [x / 255.0 for x in in_color]
# out_color = colour.Color(rgb=tuple(rescaled_color))
# return out_color.hex_l
# else:
# print(
# 'RGB color must be a tuple with three integer values ranging from 0 to 255.')
# return
# else:
# try:
# out_color = colour.Color(in_color)
# return out_color.hex_l
# except Exception as e:
# print('The provided color is invalid. Using the default black color.')
# print(e)
# return out_color
# def system_fonts(show_full_path=False):
# """Gets a list of system fonts.
# # Common font locations:
# # Linux: /usr/share/fonts/TTF/
# # Windows: C:\Windows\Fonts
# # macOS: System > Library > Fonts
# Args:
# show_full_path (bool, optional): Whether to show the full path of each system font. Defaults to False.
# Returns:
# list: A list of system fonts.
# """
# try:
# import matplotlib.font_manager
# font_list = matplotlib.font_manager.findSystemFonts(
# fontpaths=None, fontext='ttf')
# font_list.sort()
# font_names = [os.path.basename(f) for f in font_list]
# font_names.sort()
# if show_full_path:
# return font_list
# else:
# return font_names
# except Exception as e:
# print(e)
# def add_text_to_gif(in_gif, out_gif, xy=None, text_sequence=None, font_type="arial.ttf", font_size=20, font_color='#000000', add_progress_bar=True, progress_bar_color='white', progress_bar_height=5, duration=100, loop=0):
# """Adds animated text to a GIF image.
# Args:
# in_gif (str): The file path to the input GIF image.
# out_gif (str): The file path to the output GIF image.
# xy (tuple, optional): Top left corner of the text. It can be formatted like this: (10, 10) or ('15%', '25%'). Defaults to None.
# text_sequence (int, str, list, optional): Text to be drawn. It can be an integer number, a string, or a list of strings. Defaults to None.
# font_type (str, optional): Font type. Defaults to "arial.ttf".
# font_size (int, optional): Font size. Defaults to 20.
# font_color (str, optional): Font color. It can be a string (e.g., 'red'), rgb tuple (e.g., (255, 127, 0)), or hex code (e.g., '#ff00ff'). Defaults to '#000000'.
# add_progress_bar (bool, optional): Whether to add a progress bar at the bottom of the GIF. Defaults to True.
# progress_bar_color (str, optional): Color for the progress bar. Defaults to 'white'.
# progress_bar_height (int, optional): Height of the progress bar. Defaults to 5.
# duration (int, optional): controls how long each frame will be displayed for, in milliseconds. It is the inverse of the frame rate. Setting it to 100 milliseconds gives 10 frames per second. You can decrease the duration to give a smoother animation.. Defaults to 100.
# loop (int, optional): controls how many times the animation repeats. The default, 1, means that the animation will play once and then stop (displaying the last frame). A value of 0 means that the animation will repeat forever. Defaults to 0.
# """
# import io
# import pkg_resources
# import warnings
# from PIL import Image, ImageDraw, ImageSequence, ImageFont
# warnings.simplefilter('ignore')
# pkg_dir = os.path.dirname(
# pkg_resources.resource_filename("geemap", "geemap.py"))
# default_font = os.path.join(pkg_dir, 'data/fonts/arial.ttf')
# in_gif = os.path.abspath(in_gif)
# out_gif = os.path.abspath(out_gif)
# if not os.path.exists(in_gif):
# print('The input gif file does not exist.')
# return
# if not os.path.exists(os.path.dirname(out_gif)):
# os.makedirs(os.path.dirname(out_gif))
# if font_type == 'arial.ttf':
# font = ImageFont.truetype(default_font, font_size)
# else:
# try:
# font_list = system_fonts(show_full_path=True)
# font_names = [os.path.basename(f) for f in font_list]
# if (font_type in font_list) or (font_type in font_names):
# font = ImageFont.truetype(font_type, font_size)
# else:
# print(
# 'The specified font type could not be found on your system. Using the default font instead.')
# font = ImageFont.truetype(default_font, font_size)
# except Exception as e:
# print(e)
# font = ImageFont.truetype(default_font, font_size)
# color = check_color(font_color)
# progress_bar_color = check_color(progress_bar_color)
# try:
# image = Image.open(in_gif)
# except Exception as e:
# print('An error occurred while opening the gif.')
# print(e)
# return
# count = image.n_frames
# W, H = image.size
# progress_bar_widths = [i * 1.0 / count * W for i in range(1, count + 1)]
# progress_bar_shapes = [[(0, H - progress_bar_height), (x, H)]
# for x in progress_bar_widths]
# if xy is None:
# # default text location is 5% width and 5% height of the image.
# xy = (int(0.05 * W), int(0.05 * H))
# elif (xy is not None) and (not isinstance(xy, tuple)) and (len(xy) == 2):
# print("xy must be a tuple, e.g., (10, 10), ('10%', '10%')")
# return
# elif all(isinstance(item, int) for item in xy) and (len(xy) == 2):
# x, y = xy
# if (x > 0) and (x < W) and (y > 0) and (y < H):
# pass
# else:
# print(
# 'xy is out of bounds. x must be within [0, {}], and y must be within [0, {}]'.format(W, H))
# return
# elif all(isinstance(item, str) for item in xy) and (len(xy) == 2):
# x, y = xy
# if ('%' in x) and ('%' in y):
# try:
# x = int(float(x.replace('%', '')) / 100.0 * W)
# y = int(float(y.replace('%', '')) / 100.0 * H)
# xy = (x, y)
# except Exception as e:
# print(
# "The specified xy is invalid. It must be formatted like this ('10%', '10%')")
# return
# else:
# print("The specified xy is invalid. It must be formatted like this: (10, 10) or ('10%', '10%')")
# return
# if text_sequence is None:
# text = [str(x) for x in range(1, count + 1)]
# elif isinstance(text_sequence, int):
# text = [str(x) for x in range(
# text_sequence, text_sequence + count + 1)]
# elif isinstance(text_sequence, str):
# try:
# text_sequence = int(text_sequence)
# text = [str(x) for x in range(
# text_sequence, text_sequence + count + 1)]
# except Exception as e:
# text = [text_sequence] * count
# elif isinstance(text_sequence, list) and len(text_sequence) != count:
# print('The length of the text sequence must be equal to the number ({}) of frames in the gif.'.format(count))
# return
# else:
# text = [str(x) for x in text_sequence]
# try:
# frames = []
# # Loop over each frame in the animated image
# for index, frame in enumerate(ImageSequence.Iterator(image)):
# # Draw the text on the frame
# frame = frame.convert('RGB')
# draw = ImageDraw.Draw(frame)
# # w, h = draw.textsize(text[index])
# draw.text(xy, text[index], font=font, fill=color)
# if add_progress_bar:
# draw.rectangle(
# progress_bar_shapes[index], fill=progress_bar_color)
# del draw
# b = io.BytesIO()
# frame.save(b, format="GIF")
# frame = Image.open(b)
# frames.append(frame)
# # https://www.pythoninformer.com/python-libraries/pillow/creating-animated-gif/
# # Save the frames as a new image
# frames[0].save(out_gif, save_all=True,
# append_images=frames[1:], duration=duration, loop=loop, optimize=True)
# except Exception as e:
# print(e)
# return
# def open_image_from_url(url):
# """Loads an image from the specified URL.
# Args:
# url (str): URL of the image.
# Returns:
# object: Image object.
# """
# from PIL import Image
# import requests
# from io import BytesIO
# from urllib.parse import urlparse
# try:
# # if url.endswith('.gif'):
# # out_dir = os.path.join(os.path.expanduser('~'), 'Downloads')
# # if not os.path.exists(out_dir):
# # os.makedirs(out_dir)
# # a = urlparse(url)
# # out_name = os.path.basename(a.path)
# # out_path = os.path.join(out_dir, out_name)
# # download_from_url(url, out_name, out_dir, unzip=False)
# # img = Image.open(out_path)
# # else:
# response = requests.get(url)
# img = Image.open(BytesIO(response.content))
# return img
# except Exception as e:
# print(e)
# def has_transparency(img):
# """Checks whether an image has transparency.
# Args:
# img (object): a PIL Image object.
# Returns:
# bool: True if it has transparency, False otherwise.
# """
# if img.mode == "P":
# transparent = img.info.get("transparency", -1)
# for _, index in img.getcolors():
# if index == transparent:
# return True
# elif img.mode == "RGBA":
# extrema = img.getextrema()
# if extrema[3][0] < 255:
# return True
# return False
# def add_image_to_gif(in_gif, out_gif, in_image, xy=None, image_size=(80, 80), circle_mask=False):
# """Adds an image logo to a GIF image.
# Args:
# in_gif (str): Input file path to the GIF image.
# out_gif (str): Output file path to the GIF image.
# in_image (str): Input file path to the image.
# xy (tuple, optional): Top left corner of the text. It can be formatted like this: (10, 10) or ('15%', '25%'). Defaults to None.
# image_size (tuple, optional): Resize image. Defaults to (80, 80).
# circle_mask (bool, optional): Whether to apply a circle mask to the image. This only works with non-png images. Defaults to False.
# """
# import io
# import warnings
# from PIL import Image, ImageDraw, ImageSequence, ImageFilter
# warnings.simplefilter('ignore')
# in_gif = os.path.abspath(in_gif)
# is_url = False
# if in_image.startswith('http'):
# is_url = True
# if not os.path.exists(in_gif):
# print('The input gif file does not exist.')
# return
# if (not is_url) and (not os.path.exists(in_image)):
# print('The provided logo file does not exist.')
# return
# if not os.path.exists(os.path.dirname(out_gif)):
# os.makedirs(os.path.dirname(out_gif))
# try:
# image = Image.open(in_gif)
# except Exception as e:
# print('An error occurred while opening the image.')
# print(e)
# return
# try:
# if in_image.startswith('http'):
# logo_raw_image = open_image_from_url(in_image)
# else:
# in_image = os.path.abspath(in_image)
# logo_raw_image = Image.open(in_image)
# except Exception as e:
# print(e)
# logo_raw_size = logo_raw_image.size
# image_size = min(logo_raw_size[0], image_size[0]), min(
# logo_raw_size[1], image_size[1])
# logo_image = logo_raw_image.convert('RGBA')
# logo_image.thumbnail(image_size, Image.ANTIALIAS)
# W, H = image.size
# mask_im = None
# if circle_mask:
# mask_im = Image.new("L", image_size, 0)
# draw = ImageDraw.Draw(mask_im)
# draw.ellipse((0, 0, image_size[0], image_size[1]), fill=255)
# if has_transparency(logo_raw_image):
# mask_im = logo_image.copy()
# if xy is None:
# # default logo location is 5% width and 5% height of the image.
# xy = (int(0.05 * W), int(0.05 * H))
# elif (xy is not None) and (not isinstance(xy, tuple)) and (len(xy) == 2):
# print("xy must be a tuple, e.g., (10, 10), ('10%', '10%')")
# return
# elif all(isinstance(item, int) for item in xy) and (len(xy) == 2):
# x, y = xy
# if (x > 0) and (x < W) and (y > 0) and (y < H):
# pass
# else:
# print(
# 'xy is out of bounds. x must be within [0, {}], and y must be within [0, {}]'.format(W, H))
# return
# elif all(isinstance(item, str) for item in xy) and (len(xy) == 2):
# x, y = xy
# if ('%' in x) and ('%' in y):
# try:
# x = int(float(x.replace('%', '')) / 100.0 * W)
# y = int(float(y.replace('%', '')) / 100.0 * H)
# xy = (x, y)
# except Exception as e:
# print(
# "The specified xy is invalid. It must be formatted like this ('10%', '10%')")
# return
# else:
# print("The specified xy is invalid. It must be formatted like this: (10, 10) or ('10%', '10%')")
# return
# try:
# frames = []
# for index, frame in enumerate(ImageSequence.Iterator(image)):
# frame = frame.convert('RGBA')
# frame.paste(logo_image, xy, mask_im)
# b = io.BytesIO()
# frame.save(b, format="GIF")
# frame = Image.open(b)
# frames.append(frame)
# frames[0].save(out_gif, save_all=True, append_images=frames[1:])
# except Exception as e:
# print(e)
# return
# def show_image(img_path, width=None, height=None):
# """Shows an image within Jupyter notebook.
# Args:
# img_path (str): The image file path.
# width (int, optional): Width of the image in pixels. Defaults to None.
# height (int, optional): Height of the image in pixels. Defaults to None.
# """
# from IPython.display import display
# try:
# out = widgets.Output()
# # layout={'border': '1px solid black'})
# # layout={'border': '1px solid black', 'width': str(width + 20) + 'px', 'height': str(height + 10) + 'px'},)
# out.clear_output(wait=True)
# display(out)
# with out:
# file = open(img_path, "rb")
# image = file.read()
# if (width is None) and (height is None):
# display(widgets.Image(value=image))
# elif (width is not None) and (height is not None):
# display(widgets.Image(value=image, width=width, height=height))
# else:
# print('You need set both width and height.')
# return
# except Exception as e:
# print(e)
# def legend_from_ee(ee_class_table):
# """Extract legend from an Earth Engine class table on the Earth Engine Data Catalog page
# such as https://developers.google.com/earth-engine/datasets/catalog/MODIS_051_MCD12Q1
# Value Color Description
# 0 1c0dff Water
# 1 05450a Evergreen needleleaf forest
# 2 086a10 Evergreen broadleaf forest
# 3 54a708 Deciduous needleleaf forest
# 4 78d203 Deciduous broadleaf forest
# 5 009900 Mixed forest
# 6 c6b044 Closed shrublands
# 7 dcd159 Open shrublands
# 8 dade48 Woody savannas
# 9 fbff13 Savannas
# 10 b6ff05 Grasslands
# 11 27ff87 Permanent wetlands
# 12 c24f44 Croplands
# 13 a5a5a5 Urban and built-up
# 14 ff6d4c Cropland/natural vegetation mosaic
# 15 69fff8 Snow and ice
# 16 f9ffa4 Barren or sparsely vegetated
# 254 ffffff Unclassified
# Args:
# ee_class_table (str): An Earth Engine class table with triple quotes.
# Returns:
# dict: Returns a legend dictionary that can be used to create a legend.
# """
# try:
# ee_class_table = ee_class_table.strip()
# lines = ee_class_table.split('\n')[1:]
# if lines[0] == 'Value\tColor\tDescription':
# lines = lines[1:]
# legend_dict = {}
# for index, line in enumerate(lines):
# items = line.split("\t")
# items = [item.strip() for item in items]
# color = items[1]
# key = items[0] + " " + items[2]
# legend_dict[key] = color
# return legend_dict
# except Exception as e:
# print(e)
# def ee_tile_layer(ee_object, vis_params={}, name='Layer untitled', shown=True, opacity=1.0):
# """Converts and Earth Engine layer to ipyleaflet TileLayer.
# Args:
# ee_object (Collection|Feature|Image|MapId): The object to add to the map.
# vis_params (dict, optional): The visualization parameters. Defaults to {}.
# name (str, optional): The name of the layer. Defaults to 'Layer untitled'.
# shown (bool, optional): A flag indicating whether the layer should be on by default. Defaults to True.
# opacity (float, optional): The layer's opacity represented as a number between 0 and 1. Defaults to 1.
# """
# ee_initialize()
# image = None
# if not isinstance(ee_object, ee.Image) and not isinstance(ee_object, ee.ImageCollection) and not isinstance(ee_object, ee.FeatureCollection) and not isinstance(ee_object, ee.Feature) and not isinstance(ee_object, ee.Geometry):
# err_str = "\n\nThe image argument in 'addLayer' function must be an instace of one of ee.Image, ee.Geometry, ee.Feature or ee.FeatureCollection."
# raise AttributeError(err_str)
# if isinstance(ee_object, ee.geometry.Geometry) or isinstance(ee_object, ee.feature.Feature) or isinstance(ee_object, ee.featurecollection.FeatureCollection):
# features = ee.FeatureCollection(ee_object)
# width = 2
# if 'width' in vis_params:
# width = vis_params['width']
# color = '000000'
# if 'color' in vis_params:
# color = vis_params['color']
# image_fill = features.style(
# **{'fillColor': color}).updateMask(ee.Image.constant(0.5))
# image_outline = features.style(
# **{'color': color, 'fillColor': '00000000', 'width': width})
# image = image_fill.blend(image_outline)
# elif isinstance(ee_object, ee.image.Image):
# image = ee_object
# elif isinstance(ee_object, ee.imagecollection.ImageCollection):
# image = ee_object.mosaic()
# map_id_dict = ee.Image(image).getMapId(vis_params)
# tile_layer = ipyleaflet.TileLayer(
# url=map_id_dict['tile_fetcher'].url_format,
# attribution='Google Earth Engine',
# name=name,
# opacity=opacity,
# visible=True
# # visible=shown
# )
# return tile_layer
# def geojson_to_ee(geo_json, geodesic=True):
# """Converts a geojson to ee.Geometry()
# Args:
# geo_json (dict): A geojson geometry dictionary or file path.
# Returns:
# ee_object: An ee.Geometry object
# """
# ee_initialize()
# try:
# import json
# if not isinstance(geo_json, dict) and os.path.isfile(geo_json):
# with open(os.path.abspath(geo_json)) as f:
# geo_json = json.load(f)
# if geo_json['type'] == 'FeatureCollection':
# features = ee.FeatureCollection(geo_json['features'])
# return features
# elif geo_json['type'] == 'Feature':
# geom = None
# keys = geo_json['properties']['style'].keys()
# if 'radius' in keys: # Checks whether it is a circle
# geom = ee.Geometry(geo_json['geometry'])
# radius = geo_json['properties']['style']['radius']
# geom = geom.buffer(radius)
# elif geo_json['geometry']['type'] == 'Point': # Checks whether it is a point
# coordinates = geo_json['geometry']['coordinates']
# longitude = coordinates[0]
# latitude = coordinates[1]
# geom = ee.Geometry.Point(longitude, latitude)
# else:
# geom = ee.Geometry(geo_json['geometry'], "", geodesic)
# return geom
# else:
# print("Could not convert the geojson to ee.Geometry()")
# except Exception as e:
# print("Could not convert the geojson to ee.Geometry()")
# print(e)
# def ee_to_geojson(ee_object, out_json=None):
# """Converts Earth Engine object to geojson.
# Args:
# ee_object (object): An Earth Engine object.
# Returns:
# object: GeoJSON object.
# """
# from json import dumps
# ee_initialize()
# try:
# if isinstance(ee_object, ee.geometry.Geometry) or isinstance(ee_object, ee.feature.Feature) or isinstance(ee_object, ee.featurecollection.FeatureCollection):
# json_object = ee_object.getInfo()
# if out_json is not None:
# out_json = os.path.abspath(out_json)
# if not os.path.exists(os.path.dirname(out_json)):
# os.makedirs(os.path.dirname(out_json))
# geojson = open(out_json, "w")
# geojson.write(
# dumps({"type": "FeatureCollection", "features": json_object}, indent=2) + "\n")
# geojson.close()
# return json_object
# else:
# print("Could not convert the Earth Engine object to geojson")
# except Exception as e:
# print(e)
# def open_github(subdir=None):
# """Opens the GitHub repository for this package.
# Args:
# subdir (str, optional): Sub-directory of the repository. Defaults to None.
# """
# import webbrowser
# url = 'https://github.com/giswqs/geemap'
# if subdir == 'source':
# url += '/tree/master/geemap/'
# elif subdir == 'examples':
# url += '/tree/master/examples'
# elif subdir == 'tutorials':
# url += '/tree/master/tutorials'
# webbrowser.open_new_tab(url)
# def clone_repo(out_dir='.', unzip=True):
# """Clones the geemap GitHub repository.
# Args:
# out_dir (str, optional): Output folder for the repo. Defaults to '.'.
# unzip (bool, optional): Whether to unzip the repository. Defaults to True.
# """
# url = 'https://github.com/giswqs/geemap/archive/master.zip'
# filename = 'geemap-master.zip'
# download_from_url(url, out_file_name=filename,
# out_dir=out_dir, unzip=unzip)
# def open_youtube():
# """Opens the YouTube tutorials for geemap.
# """
# import webbrowser
# url = 'https://www.youtube.com/playlist?list=PLAxJ4-o7ZoPccOFv1dCwvGI6TYnirRTg3'
# webbrowser.open_new_tab(url)
# def show_youtube(id='h0pz3S6Tvx0'):
# """Displays a YouTube video within Jupyter notebooks.
# Args:
# id (str, optional): Unique ID of the video. Defaults to 'h0pz3S6Tvx0'.
# """
# from IPython.display import YouTubeVideo, display
# try:
# out = widgets.Output(
# layout={'width': '815px'})
# # layout={'border': '1px solid black', 'width': '815px'})
# out.clear_output(wait=True)
# display(out)
# with out:
# display(YouTubeVideo(id, width=800, height=450))
# except Exception as e:
# print(e)
# def check_install(package):
# """Checks whether a package is installed. If not, it will install the package.
# Args:
# package (str): The name of the package to check.
# """
# import subprocess
# try:
# __import__(package)
# # print('{} is already installed.'.format(package))
# except ImportError:
# print('{} is not installed. Installing ...'.format(package))
# try:
# subprocess.check_call(["python", '-m', 'pip', 'install', package])
# except Exception as e:
# print('Failed to install {}'.format(package))
# print(e)
# print("{} has been installed successfully.".format(package))
# def update_package():
# """Updates the geemap package from the geemap GitHub repository without the need to use pip or conda.
# In this way, I don't have to keep updating pypi and conda-forge with every minor update of the package.
# """
# try:
# download_dir = os.path.join(os.path.expanduser('~'), 'Downloads')
# if not os.path.exists(download_dir):
# os.makedirs(download_dir)
# clone_repo(out_dir=download_dir)
# pkg_dir = os.path.join(download_dir, 'geemap-master')
# work_dir = os.getcwd()
# os.chdir(pkg_dir)
# cmd = 'pip install .'
# os.system(cmd)
# os.chdir(work_dir)
# print("\nPlease comment out 'geemap.update_package()' and restart the kernel to take effect:\nJupyter menu -> Kernel -> Restart & Clear Output")
# except Exception as e:
# print(e)
# def shp_to_geojson(in_shp, out_json=None):
# """Converts a shapefile to GeoJSON.
# Args:
# in_shp (str): File path of the input shapefile.
# out_json (str, optional): File path of the output GeoJSON. Defaults to None.
# Returns:
# object: The json object representing the shapefile.
# """
# # check_install('pyshp')
# ee_initialize()
# try:
# import json
# import shapefile
# in_shp = os.path.abspath(in_shp)
# if out_json is None:
# out_json = os.path.splitext(in_shp)[0] + ".json"
# if os.path.exists(out_json):
# out_json = out_json.replace('.json', '_bk.json')
# elif not os.path.exists(os.path.dirname(out_json)):
# os.makedirs(os.path.dirname(out_json))
# reader = shapefile.Reader(in_shp)
# fields = reader.fields[1:]
# field_names = [field[0] for field in fields]
# buffer = []
# for sr in reader.shapeRecords():
# atr = dict(zip(field_names, sr.record))
# geom = sr.shape.__geo_interface__
# buffer.append(dict(type="Feature", geometry=geom, properties=atr))
# from json import dumps
# geojson = open(out_json, "w")
# geojson.write(dumps({"type": "FeatureCollection",
# "features": buffer}, indent=2) + "\n")
# geojson.close()
# with open(out_json) as f:
# json_data = json.load(f)
# return json_data
# except Exception as e:
# print(e)
# def shp_to_ee(in_shp):
# """Converts a shapefile to Earth Engine objects.
# Args:
# in_shp (str): File path to a shapefile.
# Returns:
# object: Earth Engine objects representing the shapefile.
# """
# ee_initialize()
# try:
# json_data = shp_to_geojson(in_shp)
# ee_object = geojson_to_ee(json_data)
# return ee_object
# except Exception as e:
# print(e)
# def filter_polygons(ftr):
# """Converts GeometryCollection to Polygon/MultiPolygon
# Args:
# ftr (object): ee.Feature
# Returns:
# object: ee.Feature
# """
# ee_initialize()
# geometries = ftr.geometry().geometries()
# geometries = geometries.map(lambda geo: ee.Feature(
# ee.Geometry(geo)).set('geoType', ee.Geometry(geo).type()))
# polygons = ee.FeatureCollection(geometries).filter(
# ee.Filter.eq('geoType', 'Polygon')).geometry()
# return ee.Feature(polygons).copyProperties(ftr)
# def ee_export_vector(ee_object, filename, selectors=None):
# """Exports Earth Engine FeatureCollection to other formats, including shp, csv, json, kml, and kmz.
# Args:
# ee_object (object): ee.FeatureCollection to export.
# filename (str): Output file name.
# selectors (list, optional): A list of attributes to export. Defaults to None.
# """
# import requests
# import zipfile
# ee_initialize()
# if not isinstance(ee_object, ee.FeatureCollection):
# print('The ee_object must be an ee.FeatureCollection.')
# return
# allowed_formats = ['csv', 'json', 'kml', 'kmz', 'shp']
# filename = os.path.abspath(filename)
# basename = os.path.basename(filename)
# name = os.path.splitext(basename)[0]
# filetype = os.path.splitext(basename)[1][1:].lower()
# filename_shp = filename
# if filetype == 'shp':
# filename = filename.replace('.shp', '.zip')
# if not (filetype.lower() in allowed_formats):
# print('The file type must be one of the following: {}'.format(
# ', '.join(allowed_formats)))
# return
# if selectors is None:
# selectors = ee_object.first().propertyNames().getInfo()
# elif not isinstance(selectors, list):
# print("selectors must be a list, such as ['attribute1', 'attribute2']")
# return
# else:
# allowed_attributes = ee_object.first().propertyNames().getInfo()
# for attribute in selectors:
# if not (attribute in allowed_attributes):
# print('Attributes must be one chosen from: {} '.format(
# ', '.join(allowed_attributes)))
# return
# try:
# print('Generating URL ...')
# url = ee_object.getDownloadURL(
# filetype=filetype, selectors=selectors, filename=name)
# print('Downloading data from {}\nPlease wait ...'.format(url))
# r = requests.get(url, stream=True)
# if r.status_code != 200:
# print('An error occurred while downloading. \n Retrying ...')
# try:
# new_ee_object = ee_object.map(filter_polygons)
# print('Generating URL ...')
# url = new_ee_object.getDownloadURL(
# filetype=filetype, selectors=selectors, filename=name)
# print('Downloading data from {}\nPlease wait ...'.format(url))
# r = requests.get(url, stream=True)
# except Exception as e:
# print(e)
# with open(filename, 'wb') as fd:
# for chunk in r.iter_content(chunk_size=1024):
# fd.write(chunk)
# except Exception as e:
# print('An error occurred while downloading.')
# print(e)
# return
# try:
# if filetype == 'shp':
# z = zipfile.ZipFile(filename)
# z.extractall(os.path.dirname(filename))
# os.remove(filename)
# filename = filename.replace('.zip', '.shp')
# print('Data downloaded to {}'.format(filename))
# except Exception as e:
# print(e)
# def ee_to_shp(ee_object, filename, selectors=None):
# """Downloads an ee.FeatureCollection as a shapefile.
# Args:
# ee_object (object): ee.FeatureCollection
# filename (str): The output filepath of the shapefile.
# selectors (list, optional): A list of attributes to export. Defaults to None.
# """
# ee_initialize()
# try:
# if filename.lower().endswith('.shp'):
# ee_export_vector(ee_object=ee_object,
# filename=filename, selectors=selectors)
# else:
# print('The filename must end with .shp')
# except Exception as e:
# print(e)
# def ee_to_csv(ee_object, filename, selectors=None):
# """Downloads an ee.FeatureCollection as a CSV file.
# Args:
# ee_object (object): ee.FeatureCollection
# filename (str): The output filepath of the CSV file.
# selectors (list, optional): A list of attributes to export. Defaults to None.
# """
# ee_initialize()
# try:
# if filename.lower().endswith('.csv'):
# ee_export_vector(ee_object=ee_object,
# filename=filename, selectors=selectors)
# else:
# print('The filename must end with .csv')
# except Exception as e:
# print(e)
# def ee_export_image(ee_object, filename, scale=None, crs=None, region=None, file_per_band=False):
# """Exports an ee.Image as a GeoTIFF.
# Args:
# ee_object (object): The ee.Image to download.
# filename (str): Output filename for the exported image.
# scale (float, optional): A default scale to use for any bands that do not specify one; ignored if crs and crs_transform is specified. Defaults to None.
# crs (str, optional): A default CRS string to use for any bands that do not explicitly specify one. Defaults to None.
# region (object, optional): A polygon specifying a region to download; ignored if crs and crs_transform is specified. Defaults to None.
# file_per_band (bool, optional): Whether to produce a different GeoTIFF per band. Defaults to False.
# """
# import requests
# import zipfile
# ee_initialize()
# if not isinstance(ee_object, ee.Image):
# print('The ee_object must be an ee.Image.')
# return
# filename = os.path.abspath(filename)
# basename = os.path.basename(filename)
# name = os.path.splitext(basename)[0]
# filetype = os.path.splitext(basename)[1][1:].lower()
# filename_zip = filename.replace('.tif', '.zip')
# if filetype != 'tif':
# print('The filename must end with .tif')
# return
# try:
# print('Generating URL ...')
# params = {'name': name, 'filePerBand': file_per_band}
# if scale is None:
# scale = ee_object.projection().nominalScale().multiply(10)
# params['scale'] = scale
# if region is None:
# region = ee_object.geometry()
# params['region'] = region
# if crs is not None:
# params['crs'] = crs
# url = ee_object.getDownloadURL(params)
# print('Downloading data from {}\nPlease wait ...'.format(url))
# r = requests.get(url, stream=True)
# if r.status_code != 200:
# print('An error occurred while downloading.')
# return
# with open(filename_zip, 'wb') as fd:
# for chunk in r.iter_content(chunk_size=1024):
# fd.write(chunk)
# except Exception as e:
# print('An error occurred while downloading.')
# print(e)
# return
# try:
# z = zipfile.ZipFile(filename_zip)
# z.extractall(os.path.dirname(filename))
# os.remove(filename_zip)
# if file_per_band:
# print('Data downloaded to {}'.format(os.path.dirname(filename)))
# else:
# print('Data downloaded to {}'.format(filename))
# except Exception as e:
# print(e)
# def ee_export_image_collection(ee_object, out_dir, scale=None, crs=None, region=None, file_per_band=False):
# """Exports an ImageCollection as GeoTIFFs.
# Args:
# ee_object (object): The ee.Image to download.
# out_dir (str): The output directory for the exported images.
# scale (float, optional): A default scale to use for any bands that do not specify one; ignored if crs and crs_transform is specified. Defaults to None.
# crs (str, optional): A default CRS string to use for any bands that do not explicitly specify one. Defaults to None.
# region (object, optional): A polygon specifying a region to download; ignored if crs and crs_transform is specified. Defaults to None.
# file_per_band (bool, optional): Whether to produce a different GeoTIFF per band. Defaults to False.
# """
# import requests
# import zipfile
# ee_initialize()
# if not isinstance(ee_object, ee.ImageCollection):
# print('The ee_object must be an ee.ImageCollection.')
# return
# if not os.path.exists(out_dir):
# os.makedirs(out_dir)
# try:
# count = int(ee_object.size().getInfo())
# print("Total number of images: {}\n".format(count))
# for i in range(0, count):
# image = ee.Image(ee_object.toList(count).get(i))
# name = image.get('system:index').getInfo() + '.tif'
# filename = os.path.join(os.path.abspath(out_dir), name)
# print('Exporting {}/{}: {}'.format(i+1, count, name))
# ee_export_image(image, filename=filename, scale=scale,
# crs=crs, region=region, file_per_band=file_per_band)
# print('\n')
# except Exception as e:
# print(e)
# def ee_to_numpy(ee_object, bands=None, region=None, properties=None, default_value=None):
# """Extracts a rectangular region of pixels from an image into a 2D numpy array per band.
# Args:
# ee_object (object): The image to sample.
# bands (list, optional): The list of band names to extract. Please make sure that all bands have the same spatial resolution. Defaults to None.
# region (object, optional): The region whose projected bounding box is used to sample the image. The maximum number of pixels you can export is 262,144. Resampling and reprojecting all bands to a fixed scale can be useful. Defaults to the footprint in each band.
# properties (list, optional): The properties to copy over from the sampled image. Defaults to all non-system properties.
# default_value (float, optional): A default value used when a sampled pixel is masked or outside a band's footprint. Defaults to None.
# Returns:
# array: A 3D numpy array.
# """
# import numpy as np
# if not isinstance(ee_object, ee.Image):
# print('The input must be an ee.Image.')
# return
# if region is None:
# region = ee_object.geometry()
# try:
# if bands is not None:
# ee_object = ee_object.select(bands)
# else:
# bands = ee_object.bandNames().getInfo()
# band_count = len(bands)
# band_arrs = ee_object.sampleRectangle(
# region=region, properties=properties, defaultValue=default_value)
# band_values = []
# for band in bands:
# band_arr = band_arrs.get(band).getInfo()
# band_value = np.array(band_arr)
# band_values.append(band_value)
# image = np.dstack(band_values)
# return image
# except Exception as e:
# print(e)
# def download_ee_video(collection, video_args, out_gif):
# """Downloads a video thumbnail as a GIF image from Earth Engine.
# Args:
# collection (object): An ee.ImageCollection.
# video_args ([type]): Parameters for expring the video thumbnail.
# out_gif (str): File path to the output GIF.
# """
# import requests
# out_gif = os.path.abspath(out_gif)
# if not out_gif.endswith(".gif"):
# print('The output file must have an extension of .gif.')
# return
# if not os.path.exists(os.path.dirname(out_gif)):
# os.makedirs(os.path.dirname(out_gif))
# if 'region' in video_args.keys():
# roi = video_args['region']
# if not isinstance(roi, ee.Geometry):
# try:
# roi = roi.geometry()
# except Exception as e:
# print('Could not convert the provided roi to ee.Geometry')
# print(e)
# return
# video_args['region'] = roi
# try:
# print('Generating URL...')
# url = collection.getVideoThumbURL(video_args)
# print('Downloading GIF image from {}\nPlease wait ...'.format(url))
# r = requests.get(url, stream=True)
# if r.status_code != 200:
# print('An error occurred while downloading.')
# return
# else:
# with open(out_gif, 'wb') as fd:
# for chunk in r.iter_content(chunk_size=1024):
# fd.write(chunk)
# print('The GIF image has been saved to: {}'.format(out_gif))
# except Exception as e:
# print(e)
# def zonal_statistics(in_value_raster, in_zone_vector, out_file_path, statistics_type='MEAN', scale=None, crs=None, tile_scale=1.0, **kwargs):
# """Summarizes the values of a raster within the zones of another dataset and exports the results as a csv, shp, json, kml, or kmz.
# Args:
# in_value_raster (object): An ee.Image that contains the values on which to calculate a statistic.
# in_zone_vector (object): An ee.FeatureCollection that defines the zones.
# out_file_path (str): Output file path that will contain the summary of the values in each zone. The file type can be: csv, shp, json, kml, kmz
# statistics_type (str, optional): Statistic type to be calculated. Defaults to 'MEAN'. For 'HIST', you can provide three parameters: max_buckets, min_bucket_width, and max_raw. For 'FIXED_HIST', you must provide three parameters: hist_min, hist_max, and hist_steps.
# scale (float, optional): A nominal scale in meters of the projection to work in. Defaults to None.
# crs (str, optional): The projection to work in. If unspecified, the projection of the image's first band is used. If specified in addition to scale, rescaled to the specified scale. Defaults to None.
# tile_scale (float, optional): A scaling factor used to reduce aggregation tile size; using a larger tileScale (e.g. 2 or 4) may enable computations that run out of memory with the default. Defaults to 1.0.
# """
# if not isinstance(in_value_raster, ee.Image):
# print('The input raster must be an ee.Image.')
# return
# if not isinstance(in_zone_vector, ee.FeatureCollection):
# print('The input zone data must be an ee.FeatureCollection.')
# return
# allowed_formats = ['csv', 'json', 'kml', 'kmz', 'shp']
# filename = os.path.abspath(out_file_path)
# basename = os.path.basename(filename)
# name = os.path.splitext(basename)[0]
# filetype = os.path.splitext(basename)[1][1:].lower()
# if not (filetype in allowed_formats):
# print('The file type must be one of the following: {}'.format(
# ', '.join(allowed_formats)))
# return
# # Parameters for histogram
# # The maximum number of buckets to use when building a histogram; will be rounded up to a power of 2.
# max_buckets = None
# # The minimum histogram bucket width, or null to allow any power of 2.
# min_bucket_width = None
# # The number of values to accumulate before building the initial histogram.
# max_raw = None
# hist_min = 1.0 # The lower (inclusive) bound of the first bucket.
# hist_max = 100.0 # The upper (exclusive) bound of the last bucket.
# hist_steps = 10 # The number of buckets to use.
# if 'max_buckets' in kwargs.keys():
# max_buckets = kwargs['max_buckets']
# if 'min_bucket_width' in kwargs.keys():
# min_bucket_width = kwargs['min_bucket']
# if 'max_raw' in kwargs.keys():
# max_raw = kwargs['max_raw']
# if statistics_type.upper() == 'FIXED_HIST' and ('hist_min' in kwargs.keys()) and ('hist_max' in kwargs.keys()) and ('hist_steps' in kwargs.keys()):
# hist_min = kwargs['hist_min']
# hist_max = kwargs['hist_max']
# hist_steps = kwargs['hist_steps']
# elif statistics_type.upper() == 'FIXED_HIST':
# print('To use fixedHistogram, please provide these three parameters: hist_min, hist_max, and hist_steps.')
# return
# allowed_statistics = {
# 'MEAN': ee.Reducer.mean(),
# 'MAXIMUM': ee.Reducer.max(),
# 'MEDIAN': ee.Reducer.median(),
# 'MINIMUM': ee.Reducer.min(),
# 'STD': ee.Reducer.stdDev(),
# 'MIN_MAX': ee.Reducer.minMax(),
# 'SUM': ee.Reducer.sum(),
# 'VARIANCE': ee.Reducer.variance(),
# 'HIST': ee.Reducer.histogram(maxBuckets=max_buckets, minBucketWidth=min_bucket_width, maxRaw=max_raw),
# 'FIXED_HIST': ee.Reducer.fixedHistogram(hist_min, hist_max, hist_steps)
# }
# if not (statistics_type.upper() in allowed_statistics.keys()):
# print('The statistics type must be one of the following: {}'.format(
# ', '.join(list(allowed_statistics.keys()))))
# return
# if scale is None:
# scale = in_value_raster.projection().nominalScale().multiply(10)
# try:
# print('Computing statistics ...')
# result = in_value_raster.reduceRegions(
# collection=in_zone_vector, reducer=allowed_statistics[statistics_type], scale=scale, crs=crs, tileScale=tile_scale)
# ee_export_vector(result, filename)
# except Exception as e:
# print(e)
# def zonal_statistics_by_group(in_value_raster, in_zone_vector, out_file_path, statistics_type='SUM', decimal_places=0, denominator=1.0, scale=None, crs=None, tile_scale=1.0):
# """Summarizes the area or percentage of a raster by group within the zones of another dataset and exports the results as a csv, shp, json, kml, or kmz.
# Args:
# in_value_raster (object): An integer Image that contains the values on which to calculate area/percentage.
# in_zone_vector (object): An ee.FeatureCollection that defines the zones.
# out_file_path (str): Output file path that will contain the summary of the values in each zone. The file type can be: csv, shp, json, kml, kmz
# statistics_type (str, optional): Can be either 'SUM' or 'PERCENTAGE' . Defaults to 'SUM'.
# decimal_places (int, optional): The number of decimal places to use. Defaults to 0.
# denominator (float, optional): To covert area units (e.g., from square meters to square kilometers). Defaults to 1.0.
# scale (float, optional): A nominal scale in meters of the projection to work in. Defaults to None.
# crs (str, optional): The projection to work in. If unspecified, the projection of the image's first band is used. If specified in addition to scale, rescaled to the specified scale. Defaults to None.
# tile_scale (float, optional): A scaling factor used to reduce aggregation tile size; using a larger tileScale (e.g. 2 or 4) may enable computations that run out of memory with the default. Defaults to 1.0.
# """
# if not isinstance(in_value_raster, ee.Image):
# print('The input raster must be an ee.Image.')
# return
# band_count = in_value_raster.bandNames().size().getInfo()
# band_name = ''
# if band_count == 1:
# band_name = in_value_raster.bandNames().get(0)
# else:
# print('The input image can only have one band.')
# return
# band_types = in_value_raster.bandTypes().get(band_name).getInfo()
# band_type = band_types.get('precision')
# if band_type != 'int':
# print('The input image band must be integer type.')
# return
# if not isinstance(in_zone_vector, ee.FeatureCollection):
# print('The input zone data must be an ee.FeatureCollection.')
# return
# allowed_formats = ['csv', 'json', 'kml', 'kmz', 'shp']
# filename = os.path.abspath(out_file_path)
# basename = os.path.basename(filename)
# name = os.path.splitext(basename)[0]
# filetype = os.path.splitext(basename)[1][1:]
# if not (filetype.lower() in allowed_formats):
# print('The file type must be one of the following: {}'.format(
# ', '.join(allowed_formats)))
# return
# out_dir = os.path.dirname(filename)
# if not os.path.exists(out_dir):
# os.makedirs(out_dir)
# allowed_statistics = ['SUM', 'PERCENTAGE']
# if not (statistics_type.upper() in allowed_statistics):
# print('The statistics type can only be one of {}'.format(
# ', '.join(allowed_statistics)))
# return
# if scale is None:
# scale = in_value_raster.projection().nominalScale().multiply(10)
# try:
# print('Computing ... ')
# geometry = in_zone_vector.geometry()
# hist = in_value_raster.reduceRegion(ee.Reducer.frequencyHistogram(
# ), geometry=geometry, bestEffort=True, scale=scale)
# class_values = ee.Dictionary(hist.get(band_name)).keys().map(
# lambda v: ee.Number.parse(v)).sort()
# class_names = class_values.map(
# lambda c: ee.String('Class_').cat(ee.Number(c).format()))
# class_count = class_values.size().getInfo()
# dataset = ee.Image.pixelArea().divide(denominator).addBands(in_value_raster)
# init_result = dataset.reduceRegions(**{
# 'collection': in_zone_vector,
# 'reducer': ee.Reducer.sum().group(**{
# 'groupField': 1,
# 'groupName': 'group',
# }),
# 'scale': scale
# })
# def build_dict(input_list):
# decimal_format = '%.{}f'.format(decimal_places)
# in_dict = input_list.map(lambda x: ee.Dictionary().set(ee.String('Class_').cat(
# ee.Number(ee.Dictionary(x).get('group')).format()), ee.Number.parse(ee.Number(ee.Dictionary(x).get('sum')).format(decimal_format))))
# return in_dict
# def get_keys(input_list):
# return input_list.map(lambda x: ee.String('Class_').cat(ee.Number(ee.Dictionary(x).get('group')).format()))
# def get_values(input_list):
# decimal_format = '%.{}f'.format(decimal_places)
# return input_list.map(lambda x: ee.Number.parse(ee.Number(ee.Dictionary(x).get('sum')).format(decimal_format)))
# def set_attribute(f):
# groups = ee.List(f.get('groups'))
# keys = get_keys(groups)
# values = get_values(groups)
# total_area = ee.List(values).reduce(ee.Reducer.sum())
# def get_class_values(x):
# cls_value = ee.Algorithms.If(
# keys.contains(x), values.get(keys.indexOf(x)), 0)
# cls_value = ee.Algorithms.If(ee.String(statistics_type).compareTo(ee.String(
# 'SUM')), ee.Number(cls_value).divide(ee.Number(total_area)), cls_value)
# return cls_value
# full_values = class_names.map(lambda x: get_class_values(x))
# attr_dict = ee.Dictionary.fromLists(class_names, full_values)
# attr_dict = attr_dict.set('Class_sum', total_area)
# return f.set(attr_dict).set('groups', None)
# final_result = init_result.map(set_attribute)
# ee_export_vector(final_result, filename)
# except Exception as e:
# print(e)
# def create_colorbar(width=150, height=30, palette=['blue', 'green', 'red'], add_ticks=True, add_labels=True, labels=None, vertical=False, out_file=None, font_type='arial.ttf', font_size=12, font_color='black', add_outline=True, outline_color='black'):
# """Creates a colorbar based on the provided palette.
# Args:
# width (int, optional): Width of the colorbar in pixels. Defaults to 150.
# height (int, optional): Height of the colorbar in pixels. Defaults to 30.
# palette (list, optional): Palette for the colorbar. Each color can be provided as a string (e.g., 'red'), a hex string (e.g., '#ff0000'), or an RGB tuple (255, 0, 255). Defaults to ['blue', 'green', 'red'].
# add_ticks (bool, optional): Whether to add tick markers to the colorbar. Defaults to True.
# add_labels (bool, optional): Whether to add labels to the colorbar. Defaults to True.
# labels (list, optional): A list of labels to add to the colorbar. Defaults to None.
# vertical (bool, optional): Whether to rotate the colorbar vertically. Defaults to False.
# out_file (str, optional): File path to the output colorbar in png format. Defaults to None.
# font_type (str, optional): Font type to use for labels. Defaults to 'arial.ttf'.
# font_size (int, optional): Font size to use for labels. Defaults to 12.
# font_color (str, optional): Font color to use for labels. Defaults to 'black'.
# add_outline (bool, optional): Whether to add an outline to the colorbar. Defaults to True.
# outline_color (str, optional): Color for the outline of the colorbar. Defaults to 'black'.
# Returns:
# str: File path of the output colorbar in png format.
# """
# import decimal
# import io
# import math
# import pkg_resources
# import warnings
# from colour import Color
# from PIL import Image, ImageDraw, ImageFont
# warnings.simplefilter('ignore')
# pkg_dir = os.path.dirname(
# pkg_resources.resource_filename("geemap", "geemap.py"))
# if out_file is None:
# filename = 'colorbar_' + random_string() + '.png'
# out_dir = os.path.join(os.path.expanduser('~'), 'Downloads')
# out_file = os.path.join(out_dir, filename)
# elif not out_file.endswith('.png'):
# print('The output file must end with .png')
# return
# else:
# out_file = os.path.abspath(out_file)
# if not os.path.exists(os.path.dirname(out_file)):
# os.makedirs(os.path.dirname(out_file))
# im = Image.new('RGBA', (width, height))
# ld = im.load()
# def float_range(start, stop, step):
# while start < stop:
# yield float(start)
# start += decimal.Decimal(step)
# n_colors = len(palette)
# decimal_places = 2
# rgb_colors = [Color(check_color(c)).rgb for c in palette]
# keys = [round(c, decimal_places)
# for c in list(float_range(0, 1.0001, 1.0/(n_colors - 1)))]
# heatmap = []
# for index, item in enumerate(keys):
# pair = [item, rgb_colors[index]]
# heatmap.append(pair)
# def gaussian(x, a, b, c, d=0):
# return a * math.exp(-(x - b)**2 / (2 * c**2)) + d
# def pixel(x, width=100, map=[], spread=1):
# width = float(width)
# r = sum([gaussian(x, p[1][0], p[0] * width, width/(spread*len(map)))
# for p in map])
# g = sum([gaussian(x, p[1][1], p[0] * width, width/(spread*len(map)))
# for p in map])
# b = sum([gaussian(x, p[1][2], p[0] * width, width/(spread*len(map)))
# for p in map])
# return min(1.0, r), min(1.0, g), min(1.0, b)
# for x in range(im.size[0]):
# r, g, b = pixel(x, width=width, map=heatmap)
# r, g, b = [int(256*v) for v in (r, g, b)]
# for y in range(im.size[1]):
# ld[x, y] = r, g, b
# if add_outline:
# draw = ImageDraw.Draw(im)
# draw.rectangle([(0, 0), (width-1, height-1)],
# outline=check_color(outline_color))
# del draw
# if add_ticks:
# tick_length = height * 0.1
# x = [key * width for key in keys]
# y_top = height - tick_length
# y_bottom = height
# draw = ImageDraw.Draw(im)
# for i in x:
# shape = [(i, y_top), (i, y_bottom)]
# draw.line(shape, fill='black', width=0)
# del draw
# if vertical:
# im = im.transpose(Image.ROTATE_90)
# width, height = im.size
# if labels is None:
# labels = [str(c) for c in keys]
# elif len(labels) == 2:
# try:
# lowerbound = float(labels[0])
# upperbound = float(labels[1])
# step = (upperbound - lowerbound) / (len(palette) - 1)
# labels = [str(lowerbound + c * step)
# for c in range(0, len(palette))]
# except Exception as e:
# print(e)
# print('The labels are invalid.')
# return
# elif len(labels) == len(palette):
# labels = [str(c) for c in labels]
# else:
# print('The labels must have the same length as the palette.')
# return
# if add_labels:
# default_font = os.path.join(pkg_dir, 'data/fonts/arial.ttf')
# if font_type == 'arial.ttf':
# font = ImageFont.truetype(default_font, font_size)
# else:
# try:
# font_list = system_fonts(show_full_path=True)
# font_names = [os.path.basename(f) for f in font_list]
# if (font_type in font_list) or (font_type in font_names):
# font = ImageFont.truetype(font_type, font_size)
# else:
# print(
# 'The specified font type could not be found on your system. Using the default font instead.')
# font = ImageFont.truetype(default_font, font_size)
# except Exception as e:
# print(e)
# font = ImageFont.truetype(default_font, font_size)
# font_color = check_color(font_color)
# draw = ImageDraw.Draw(im)
# w, h = draw.textsize(labels[0], font=font)
# for label in labels:
# w_tmp, h_tmp = draw.textsize(label, font)
# if w_tmp > w:
# w = w_tmp
# if h_tmp > h:
# h = h_tmp
# W, H = width + w * 2, height + h * 2
# background = Image.new('RGBA', (W, H))
# draw = ImageDraw.Draw(background)
# if vertical:
# xy = (0, h)
# else:
# xy = (w, 0)
# background.paste(im, xy, im)
# for index, label in enumerate(labels):
# w_tmp, h_tmp = draw.textsize(label, font)
# if vertical:
# spacing = 5
# x = width + spacing
# y = int(height + h - keys[index] * height - h_tmp / 2 - 1)
# draw.text((x, y), label, font=font, fill=font_color)
# else:
# x = int(keys[index] * width + w - w_tmp / 2)
# spacing = int(h * 0.05)
# y = height + spacing
# draw.text((x, y), label, font=font, fill=font_color)
# im = background.copy()
# im.save(out_file)
# return out_file
# def naip_timeseries(roi=None, start_year=2009, end_year=2018):
# """Creates NAIP annual timeseries
# Args:
# roi (object, optional): An ee.Geometry representing the region of interest. Defaults to None.
# start_year (int, optional): Starting year for the timeseries. Defaults to2009.
# end_year (int, optional): Ending year for the timeseries. Defaults to 2018.
# Returns:
# object: An ee.ImageCollection representing annual NAIP imagery.
# """
# ee_initialize()
# try:
# def get_annual_NAIP(year):
# try:
# collection = ee.ImageCollection('USDA/NAIP/DOQQ')
# if roi is not None:
# collection = collection.filterBounds(roi)
# start_date = ee.Date.fromYMD(year, 1, 1)
# end_date = ee.Date.fromYMD(year, 12, 31)
# naip = collection.filterDate(start_date, end_date) \
# .filter(ee.Filter.listContains("system:band_names", "N"))
# naip = ee.Image(ee.ImageCollection(naip).mosaic())
# return naip
# except Exception as e:
# print(e)
# years = ee.List.sequence(start_year, end_year)
# collection = years.map(get_annual_NAIP)
# return collection
# except Exception as e:
# print(e)
# def sentinel2_timeseries(roi=None, start_year=2015, end_year=2019, start_date='01-01', end_date='12-31'):
# """Generates an annual Sentinel 2 ImageCollection. This algorithm is adapted from https://gist.github.com/jdbcode/76b9ac49faf51627ebd3ff988e10adbc. A huge thank you to Justin Braaten for sharing his fantastic work.
# Images include both level 1C and level 2A imagery.
# Args:
# roi (object, optional): Region of interest to create the timelapse. Defaults to None.
# start_year (int, optional): Starting year for the timelapse. Defaults to 2015.
# end_year (int, optional): Ending year for the timelapse. Defaults to 2019.
# start_date (str, optional): Starting date (month-day) each year for filtering ImageCollection. Defaults to '01-01'.
# end_date (str, optional): Ending date (month-day) each year for filtering ImageCollection. Defaults to '12-31'.
# Returns:
# object: Returns an ImageCollection containing annual Sentinel 2 images.
# """
# ################################################################################
# ################################################################################
# # Input and output parameters.
# import re
# import datetime
# ee_initialize()
# if roi is None:
# # roi = ee.Geometry.Polygon(
# # [[[-180, -80],
# # [-180, 80],
# # [180, 80],
# # [180, -80],
# # [-180, -80]]], None, False)
# roi = ee.Geometry.Polygon(
# [[[-115.471773, 35.892718],
# [-115.471773, 36.409454],
# [-114.271283, 36.409454],
# [-114.271283, 35.892718],
# [-115.471773, 35.892718]]], None, False)
# if not isinstance(roi, ee.Geometry):
# try:
# roi = roi.geometry()
# except Exception as e:
# print('Could not convert the provided roi to ee.Geometry')
# print(e)
# return
# ################################################################################
# # Setup vars to get dates.
# if isinstance(start_year, int) and (start_year >= 2015) and (start_year <= 2020):
# pass
# else:
# print('The start year must be an integer >= 2015.')
# return
# if isinstance(end_year, int) and (end_year >= 2015) and (end_year <= 2020):
# pass
# else:
# print('The end year must be an integer <= 2020.')
# return
# if re.match("[0-9]{2}\-[0-9]{2}", start_date) and re.match("[0-9]{2}\-[0-9]{2}", end_date):
# pass
# else:
# print('The start data and end date must be month-day, such as 06-10, 09-20')
# return
# try:
# datetime.datetime(int(start_year), int(
# start_date[:2]), int(start_date[3:5]))
# datetime.datetime(int(end_year), int(end_date[:2]), int(end_date[3:5]))
# except Exception as e:
# print('The input dates are invalid.')
# print(e)
# return
# try:
# start_test = datetime.datetime(int(start_year), int(
# start_date[:2]), int(start_date[3:5]))
# end_test = datetime.datetime(
# int(end_year), int(end_date[:2]), int(end_date[3:5]))
# if start_test > end_test:
# raise ValueError('Start date must be prior to end date')
# except Exception as e:
# print(e)
# return
# def days_between(d1, d2):
# d1 = datetime.datetime.strptime(d1, "%Y-%m-%d")
# d2 = datetime.datetime.strptime(d2, "%Y-%m-%d")
# return abs((d2 - d1).days)
# n_days = days_between(str(start_year) + '-' + start_date,
# str(start_year) + '-' + end_date)
# start_month = int(start_date[:2])
# start_day = int(start_date[3:5])
# start_date = str(start_year) + '-' + start_date
# end_date = str(end_year) + '-' + end_date
# # Define a collection filter by date, bounds, and quality.
# def colFilter(col, aoi): # , startDate, endDate):
# return(col.filterBounds(aoi))
# # Get Sentinel 2 collections, both Level-1C (top of atmophere) and Level-2A (surface reflectance)
# MSILCcol = ee.ImageCollection('COPERNICUS/S2')
# MSI2Acol = ee.ImageCollection('COPERNICUS/S2_SR')
# # Define a collection filter by date, bounds, and quality.
# def colFilter(col, roi, start_date, end_date):
# return(col
# .filterBounds(roi)
# .filterDate(start_date, end_date))
# # .filter('CLOUD_COVER < 5')
# # .filter('GEOMETRIC_RMSE_MODEL < 15')
# # .filter('IMAGE_QUALITY == 9 || IMAGE_QUALITY_OLI == 9'))
# # Function to get and rename bands of interest from MSI
# def renameMSI(img):
# return(img.select(
# ['B2', 'B3', 'B4', 'B5', 'B6', 'B7',
# 'B8', 'B8A', 'B11', 'B12', 'QA60'],
# ['Blue', 'Green', 'Red', 'Red Edge 1', 'Red Edge 2', 'Red Edge 3', 'NIR', 'Red Edge 4', 'SWIR1', 'SWIR2', 'QA60']))
# # Add NBR for LandTrendr segmentation.
# def calcNbr(img):
# return(img.addBands(img.normalizedDifference(['NIR', 'SWIR2'])
# .multiply(-10000).rename('NBR')).int16())
# # Define function to mask out clouds and cloud shadows in images.
# # Use CFmask band included in USGS Landsat SR image product.
# def fmask(img):
# cloudOpaqueBitMask = 1 << 10
# cloudCirrusBitMask = 1 << 11
# qa = img.select('QA60')
# mask = qa.bitwiseAnd(cloudOpaqueBitMask).eq(0) \
# .And(qa.bitwiseAnd(cloudCirrusBitMask).eq(0))
# return(img.updateMask(mask))
# # Define function to prepare MSI images.
# def prepMSI(img):
# orig = img
# img = renameMSI(img)
# img = fmask(img)
# return(ee.Image(img.copyProperties(orig, orig.propertyNames()))
# .resample('bicubic'))
# # Get annual median collection.
# def getAnnualComp(y):
# startDate = ee.Date.fromYMD(
# ee.Number(y), ee.Number(start_month), ee.Number(start_day))
# endDate = startDate.advance(ee.Number(n_days), 'day')
# # Filter collections and prepare them for merging.
# MSILCcoly = colFilter(MSILCcol, roi, startDate, endDate).map(prepMSI)
# MSI2Acoly = colFilter(MSI2Acol, roi, startDate, endDate).map(prepMSI)
# # Merge the collections.
# col = MSILCcoly.merge(MSI2Acoly)
# yearImg = col.median()
# nBands = yearImg.bandNames().size()
# yearImg = ee.Image(ee.Algorithms.If(
# nBands,
# yearImg,
# dummyImg))
# return(calcNbr(yearImg)
# .set({'year': y, 'system:time_start': startDate.millis(), 'nBands': nBands}))
# ################################################################################
# # Make a dummy image for missing years.
# bandNames = ee.List(['Blue', 'Green', 'Red', 'Red Edge 1',
# 'Red Edge 2', 'Red Edge 3', 'NIR',
# 'Red Edge 4', 'SWIR1', 'SWIR2', 'QA60'])
# fillerValues = ee.List.repeat(0, bandNames.size())
# dummyImg = ee.Image.constant(fillerValues).rename(bandNames) \
# .selfMask().int16()
# ################################################################################
# # Get a list of years
# years = ee.List.sequence(start_year, end_year)
# ################################################################################
# # Make list of annual image composites.
# imgList = years.map(getAnnualComp)
# # Convert image composite list to collection
# imgCol = ee.ImageCollection.fromImages(imgList)
# imgCol = imgCol.map(lambda img: img.clip(roi))
# return imgCol
# def landsat_timeseries(roi=None, start_year=1984, end_year=2019, start_date='06-10', end_date='09-20'):
# """Generates an annual Landsat ImageCollection. This algorithm is adapted from https://gist.github.com/jdbcode/76b9ac49faf51627ebd3ff988e10adbc. A huge thank you to Justin Braaten for sharing his fantastic work.
# Args:
# roi ([type], optional): [description]. Defaults to None.
# start_year (int, optional): [description]. Defaults to 1984.
# end_year (int, optional): [description]. Defaults to 2019.
# start_date (str, optional): [description]. Defaults to '06-10'.
# end_date (str, optional): [description]. Defaults to '09-20'.
# roi (object, optional): Region of interest to create the timelapse. Defaults to None.
# start_year (int, optional): Starting year for the timelapse. Defaults to 1984.
# end_year (int, optional): Ending year for the timelapse. Defaults to 2019.
# start_date (str, optional): Starting date (month-day) each year for filtering ImageCollection. Defaults to '06-10'.
# end_date (str, optional): Ending date (month-day) each year for filtering ImageCollection. Defaults to '09-20'.
# Returns:
# object: Returns an ImageCollection containing annual Landsat images.
# """
# ################################################################################
# # Input and output parameters.
# import re
# import datetime
# ee_initialize()
# if roi is None:
# # roi = ee.Geometry.Polygon(
# # [[[-180, -80],
# # [-180, 80],
# # [180, 80],
# # [180, -80],
# # [-180, -80]]], None, False)
# roi = ee.Geometry.Polygon(
# [[[-115.471773, 35.892718],
# [-115.471773, 36.409454],
# [-114.271283, 36.409454],
# [-114.271283, 35.892718],
# [-115.471773, 35.892718]]], None, False)
# if not isinstance(roi, ee.Geometry):
# try:
# roi = roi.geometry()
# except Exception as e:
# print('Could not convert the provided roi to ee.Geometry')
# print(e)
# return
# ################################################################################
# # Setup vars to get dates.
# if isinstance(start_year, int) and (start_year >= 1984) and (start_year < 2020):
# pass
# else:
# print('The start year must be an integer >= 1984.')
# return
# if isinstance(end_year, int) and (end_year > 1984) and (end_year <= 2020):
# pass
# else:
# print('The end year must be an integer <= 2020.')
# return
# if re.match("[0-9]{2}\-[0-9]{2}", start_date) and re.match("[0-9]{2}\-[0-9]{2}", end_date):
# pass
# else:
# print('The start date and end date must be month-day, such as 06-10, 09-20')
# return
# try:
# datetime.datetime(int(start_year), int(
# start_date[:2]), int(start_date[3:5]))
# datetime.datetime(int(end_year), int(end_date[:2]), int(end_date[3:5]))
# except Exception as e:
# print('The input dates are invalid.')
# return
# def days_between(d1, d2):
# d1 = datetime.datetime.strptime(d1, "%Y-%m-%d")
# d2 = datetime.datetime.strptime(d2, "%Y-%m-%d")
# return abs((d2 - d1).days)
# n_days = days_between(str(start_year) + '-' + start_date,
# str(start_year) + '-' + end_date)
# start_month = int(start_date[:2])
# start_day = int(start_date[3:5])
# start_date = str(start_year) + '-' + start_date
# end_date = str(end_year) + '-' + end_date
# # Define a collection filter by date, bounds, and quality.
# def colFilter(col, aoi): # , startDate, endDate):
# return(col.filterBounds(aoi))
# # Landsat collection preprocessingEnabled
# # Get Landsat surface reflectance collections for OLI, ETM+ and TM sensors.
# LC08col = ee.ImageCollection('LANDSAT/LC08/C01/T1_SR')
# LE07col = ee.ImageCollection('LANDSAT/LE07/C01/T1_SR')
# LT05col = ee.ImageCollection('LANDSAT/LT05/C01/T1_SR')
# LT04col = ee.ImageCollection('LANDSAT/LT04/C01/T1_SR')
# # Define a collection filter by date, bounds, and quality.
# def colFilter(col, roi, start_date, end_date):
# return(col
# .filterBounds(roi)
# .filterDate(start_date, end_date))
# # .filter('CLOUD_COVER < 5')
# # .filter('GEOMETRIC_RMSE_MODEL < 15')
# # .filter('IMAGE_QUALITY == 9 || IMAGE_QUALITY_OLI == 9'))
# # Function to get and rename bands of interest from OLI.
# def renameOli(img):
# return(img.select(
# ['B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'pixel_qa'],
# ['Blue', 'Green', 'Red', 'NIR', 'SWIR1', 'SWIR2', 'pixel_qa']))
# # Function to get and rename bands of interest from ETM+.
# def renameEtm(img):
# return(img.select(
# ['B1', 'B2', 'B3', 'B4', 'B5', 'B7', 'pixel_qa'],
# ['Blue', 'Green', 'Red', 'NIR', 'SWIR1', 'SWIR2', 'pixel_qa']))
# # Add NBR for LandTrendr segmentation.
# def calcNbr(img):
# return(img.addBands(img.normalizedDifference(['NIR', 'SWIR2'])
# .multiply(-10000).rename('NBR')).int16())
# # Define function to mask out clouds and cloud shadows in images.
# # Use CFmask band included in USGS Landsat SR image product.
# def fmask(img):
# cloudShadowBitMask = 1 << 3
# cloudsBitMask = 1 << 5
# qa = img.select('pixel_qa')
# mask = qa.bitwiseAnd(cloudShadowBitMask).eq(0) \
# .And(qa.bitwiseAnd(cloudsBitMask).eq(0))
# return(img.updateMask(mask))
# # Define function to prepare OLI images.
# def prepOli(img):
# orig = img
# img = renameOli(img)
# img = fmask(img)
# return (ee.Image(img.copyProperties(orig, orig.propertyNames()))
# .resample('bicubic'))
# # Define function to prepare ETM+ images.
# def prepEtm(img):
# orig = img
# img = renameEtm(img)
# img = fmask(img)
# return(ee.Image(img.copyProperties(orig, orig.propertyNames()))
# .resample('bicubic'))
# # Get annual median collection.
# def getAnnualComp(y):
# startDate = ee.Date.fromYMD(
# ee.Number(y), ee.Number(start_month), ee.Number(start_day))
# endDate = startDate.advance(ee.Number(n_days), 'day')
# # Filter collections and prepare them for merging.
# LC08coly = colFilter(LC08col, roi, startDate, endDate).map(prepOli)
# LE07coly = colFilter(LE07col, roi, startDate, endDate).map(prepEtm)
# LT05coly = colFilter(LT05col, roi, startDate, endDate).map(prepEtm)
# LT04coly = colFilter(LT04col, roi, startDate, endDate).map(prepEtm)
# # Merge the collections.
# col = LC08coly.merge(LE07coly).merge(LT05coly).merge(LT04coly)
# yearImg = col.median()
# nBands = yearImg.bandNames().size()
# yearImg = ee.Image(ee.Algorithms.If(
# nBands,
# yearImg,
# dummyImg))
# return(calcNbr(yearImg)
# .set({'year': y, 'system:time_start': startDate.millis(), 'nBands': nBands}))
# ################################################################################
# # Make a dummy image for missing years.
# bandNames = ee.List(['Blue', 'Green', 'Red', 'NIR',
# 'SWIR1', 'SWIR2', 'pixel_qa'])
# fillerValues = ee.List.repeat(0, bandNames.size())
# dummyImg = ee.Image.constant(fillerValues).rename(bandNames) \
# .selfMask().int16()
# ################################################################################
# # Get a list of years
# years = ee.List.sequence(start_year, end_year)
# ################################################################################
# # Make list of annual image composites.
# imgList = years.map(getAnnualComp)
# # Convert image composite list to collection
# imgCol = ee.ImageCollection.fromImages(imgList)
# imgCol = imgCol.map(lambda img: img.clip(roi))
# return imgCol
# ################################################################################
# # Run LandTrendr.
# lt = ee.Algorithms.TemporalSegmentation.LandTrendr(
# timeSeries=imgCol.select(['NBR', 'SWIR1', 'NIR', 'Green']),
# maxSegments=10,
# spikeThreshold=0.7,
# vertexCountOvershoot=3,
# preventOneYearRecovery=True,
# recoveryThreshold=0.5,
# pvalThreshold=0.05,
# bestModelProportion=0.75,
# minObservationsNeeded=6)
# ################################################################################
# # Get fitted imagery. This starts export tasks.
# def getYearStr(year):
# return(ee.String('yr_').cat(ee.Algorithms.String(year).slice(0,4)))
# yearsStr = years.map(getYearStr)
# r = lt.select(['SWIR1_fit']).arrayFlatten([yearsStr]).toShort()
# g = lt.select(['NIR_fit']).arrayFlatten([yearsStr]).toShort()
# b = lt.select(['Green_fit']).arrayFlatten([yearsStr]).toShort()
# for i, c in zip([r, g, b], ['r', 'g', 'b']):
# descr = 'mamore-river-'+c
# name = 'users/user/'+descr
# print(name)
# task = ee.batch.Export.image.toAsset(
# image=i,
# region=roi.getInfo()['coordinates'],
# assetId=name,
# description=descr,
# scale=30,
# crs='EPSG:3857',
# maxPixels=1e13)
# task.start()
# def landsat_ts_gif(roi=None, out_gif=None, start_year=1984, end_year=2019, start_date='06-10', end_date='09-20', bands=['NIR', 'Red', 'Green'], vis_params=None, dimensions=768, frames_per_second=10):
# """Generates a Landsat timelapse GIF image. This function is adapted from https://emaprlab.users.earthengine.app/view/lt-gee-time-series-animator. A huge thank you to Justin Braaten for sharing his fantastic work.
# Args:
# roi (object, optional): Region of interest to create the timelapse. Defaults to None.
# out_gif ([type], optional): File path to the output animated GIF. Defaults to None.
# start_year (int, optional): Starting year for the timelapse. Defaults to 1984.
# end_year (int, optional): Ending year for the timelapse. Defaults to 2019.
# start_date (str, optional): Starting date (month-day) each year for filtering ImageCollection. Defaults to '06-10'.
# end_date (str, optional): Ending date (month-day) each year for filtering ImageCollection. Defaults to '09-20'.
# bands (list, optional): Three bands selected from ['Blue', 'Green', 'Red', 'NIR', 'SWIR1', 'SWIR2', 'pixel_qa']. Defaults to ['NIR', 'Red', 'Green'].
# vis_params (dict, optional): Visualization parameters. Defaults to None.
# dimensions (int, optional): a number or pair of numbers in format WIDTHxHEIGHT) Maximum dimensions of the thumbnail to render, in pixels. If only one number is passed, it is used as the maximum, and the other dimension is computed by proportional scaling. Defaults to 768.
# frames_per_second (int, optional): Animation speed. Defaults to 10.
# Returns:
# str: File path to the output GIF image.
# """
# ee_initialize()
# if roi is None:
# roi = ee.Geometry.Polygon(
# [[[-115.471773, 35.892718],
# [-115.471773, 36.409454],
# [-114.271283, 36.409454],
# [-114.271283, 35.892718],
# [-115.471773, 35.892718]]], None, False)
# elif isinstance(roi, ee.Feature) or isinstance(roi, ee.FeatureCollection):
# roi = roi.geometry()
# elif isinstance(roi, ee.Geometry):
# pass
# else:
# print('The provided roi is invalid. It must be an ee.Geometry')
# return
# if out_gif is None:
# out_dir = os.path.join(os.path.expanduser('~'), 'Downloads')
# filename = 'landsat_ts_' + random_string() + '.gif'
# out_gif = os.path.join(out_dir, filename)
# elif not out_gif.endswith('.gif'):
# print('The output file must end with .gif')
# return
# elif not os.path.isfile(out_gif):
# print('The output file must be a file')
# return
# else:
# out_gif = os.path.abspath(out_gif)
# out_dir = os.path.dirname(out_gif)
# if not os.path.exists(out_dir):
# os.makedirs(out_dir)
# allowed_bands = ['Blue', 'Green', 'Red',
# 'NIR', 'SWIR1', 'SWIR2', 'pixel_qa']
# if len(bands) == 3 and all(x in allowed_bands for x in bands):
# pass
# else:
# print('You can only select 3 bands from the following: {}'.format(
# ', '.join(allowed_bands)))
# return
# try:
# col = landsat_timeseries(
# roi, start_year, end_year, start_date, end_date)
# if vis_params is None:
# vis_params = {}
# vis_params['bands'] = bands
# vis_params['min'] = 0
# vis_params['max'] = 4000
# vis_params['gamma'] = [1, 1, 1]
# video_args = vis_params.copy()
# video_args['dimensions'] = dimensions
# video_args['region'] = roi
# video_args['framesPerSecond'] = frames_per_second
# video_args['crs'] = 'EPSG:3857'
# if 'bands' not in video_args.keys():
# video_args['bands'] = bands
# if 'min' not in video_args.keys():
# video_args['min'] = 0
# if 'max' not in video_args.keys():
# video_args['max'] = 4000
# if 'gamma' not in video_args.keys():
# video_args['gamma'] = [1, 1, 1]
# download_ee_video(col, video_args, out_gif)
# return out_gif
# except Exception as e:
# print(e)
# return
# def minimum_bounding_box(geojson):
# """Gets the minimum bounding box for a geojson polygon.
# Args:
# geojson (dict): A geojson dictionary.
# Returns:
# tuple: Returns a tuple containing the minimum bounding box in the format of (lower_left(lat, lon), upper_right(lat, lon)), such as ((13, -130), (32, -120)).
# """
# coordinates = []
# try:
# if 'geometry' in geojson.keys():
# coordinates = geojson['geometry']['coordinates'][0]
# else:
# coordinates = geojson['coordinates'][0]
# lower_left = min([x[1] for x in coordinates]), min(
# [x[0] for x in coordinates]) # (lat, lon)
# upper_right = max([x[1] for x in coordinates]), max([x[0]
# for x in coordinates]) # (lat, lon)
# bounds = (lower_left, upper_right)
# return bounds
# except Exception as e:
# print(e)
# return
# def geocode(location, max_rows=10, reverse=False):
# """Search location by address and lat/lon coordinates.
# Args:
# location (str): Place name or address
# max_rows (int, optional): Maximum number of records to return. Defaults to 10.
# reverse (bool, optional): Search place based on coordinates. Defaults to False.
# Returns:
# list: Returns a list of locations.
# """
# import geocoder
# if not isinstance(location, str):
# print('The location must be a string.')
# return None
# if not reverse:
# locations = []
# addresses = set()
# g = geocoder.arcgis(location, maxRows=max_rows)
# for result in g:
# address = result.address
# if not address in addresses:
# addresses.add(address)
# locations.append(result)
# if len(locations) > 0:
# return locations
# else:
# return None
# else:
# try:
# if ',' in location:
# latlon = [float(x) for x in location.split(',')]
# elif ' ' in location:
# latlon = [float(x) for x in location.split(' ')]
# else:
# print(
# 'The lat-lon coordinates should be numbers only and separated by comma or space, such as 40.2, -100.3')
# return
# g = geocoder.arcgis(latlon, method='reverse')
# locations = []
# addresses = set()
# for result in g:
# address = result.address
# if not address in addresses:
# addresses.add(address)
# locations.append(result)
# if len(locations) > 0:
# return locations
# else:
# return None
# except Exception as e:
# print(e)
# return None
# def is_latlon_valid(location):
# """Checks whether a pair of coordinates is valid.
# Args:
# location (str): A pair of latlon coordinates separated by comma or space.
# Returns:
# bool: Returns True if valid.
# """
# latlon = []
# if ',' in location:
# latlon = [float(x) for x in location.split(',')]
# elif ' ' in location:
# latlon = [float(x) for x in location.split(' ')]
# else:
# print(
# 'The coordinates should be numbers only and separated by comma or space, such as 40.2, -100.3')
# return False
# try:
# lat, lon = float(latlon[0]), float(latlon[1])
# if lat >= -90 and lat <= 90 and lon >= -180 and lat <= 180:
# return True
# else:
# return False
# except Exception as e:
# print(e)
# return False
# def latlon_from_text(location):
# """Extracts latlon from text.
# Args:
# location (str): A pair of latlon coordinates separated by comma or space.
# Returns:
# bool: Returns (lat, lon) if valid.
# """
# latlon = []
# try:
# if ',' in location:
# latlon = [float(x) for x in location.split(',')]
# elif ' ' in location:
# latlon = [float(x) for x in location.split(' ')]
# else:
# print(
# 'The lat-lon coordinates should be numbers only and separated by comma or space, such as 40.2, -100.3')
# return None
# lat, lon = latlon[0], latlon[1]
# if lat >= -90 and lat <= 90 and lon >= -180 and lat <= 180:
# return lat, lon
# else:
# return None
# except Exception as e:
# print(e)
# print('The lat-lon coordinates should be numbers only and separated by comma or space, such as 40.2, -100.3')
# return None
# def search_ee_data(keywords):
# """Searches Earth Engine data catalog.
# Args:
# keywords (str): Keywords to search for can be id, provider, tag and so on
# Returns:
# list: Returns a lit of assets.
# """
# try:
# cmd = 'geeadd search --keywords "{}"'.format(str(keywords))
# output = os.popen(cmd).read()
# start_index = output.index('[')
# assets = eval(output[start_index:])
# results = []
# for asset in assets:
# asset_dates = asset['start_date'] + ' - ' + asset['end_date']
# asset_snippet = asset['ee_id_snippet']
# start_index = asset_snippet.index("'") + 1
# end_index = asset_snippet.index("'", start_index)
# asset_id = asset_snippet[start_index:end_index]
# asset['dates'] = asset_dates
# asset['id'] = asset_id
# asset['uid'] = asset_id.replace('/', '_')
# # asset['url'] = 'https://developers.google.com/earth-engine/datasets/catalog/' + asset['uid']
# # asset['thumbnail'] = 'https://mw1.google.com/ges/dd/images/{}_sample.png'.format(
# # asset['uid'])
# results.append(asset)
# return results
# except Exception as e:
# print(e)
# return
# def ee_data_thumbnail(asset_id):
# """Retrieves the thumbnail URL of an Earth Engine asset.
# Args:
# asset_id (str): An Earth Engine asset id.
# Returns:
# str: An http url of the thumbnail.
# """
# import requests
# import urllib
# from bs4 import BeautifulSoup
# asset_uid = asset_id.replace('/', '_')
# asset_url = "https://developers.google.com/earth-engine/datasets/catalog/{}".format(
# asset_uid)
# thumbnail_url = 'https://mw1.google.com/ges/dd/images/{}_sample.png'.format(
# asset_uid)
# r = requests.get(thumbnail_url)
# try:
# if r.status_code != 200:
# html_page = urllib.request.urlopen(asset_url)
# soup = BeautifulSoup(html_page, features="html.parser")
# for img in soup.findAll('img'):
# if 'sample.png' in img.get('src'):
# thumbnail_url = img.get('src')
# return thumbnail_url
# return thumbnail_url
# except Exception as e:
# print(e)
# return
# def ee_data_html(asset):
# """Generates HTML from an asset to be used in the HTML widget.
# Args:
# asset (dict): A dictionary containing an Earth Engine asset.
# Returns:
# str: A string containing HTML.
# """
# template = '''
# <html>
# <body>
# <h3>asset_title</h3>
# <h4>Dataset Availability</h4>
# <p style="margin-left: 40px">asset_dates</p>
# <h4>Earth Engine Snippet</h4>
# <p style="margin-left: 40px">ee_id_snippet</p>
# <h4>Earth Engine Data Catalog</h4>
# <p style="margin-left: 40px"><a href="asset_url" target="_blank">asset_id</a></p>
# <h4>Dataset Thumbnail</h4>
# <img src="thumbnail_url">
# </body>
# </html>
# '''
# try:
# text = template.replace('asset_title', asset['title'])
# text = text.replace('asset_dates', asset['dates'])
# text = text.replace('ee_id_snippet', asset['ee_id_snippet'])
# text = text.replace('asset_id', asset['id'])
# text = text.replace('asset_url', asset['asset_url'])
# # asset['thumbnail'] = ee_data_thumbnail(asset['id'])
# text = text.replace('thumbnail_url', asset['thumbnail_url'])
# return text
# except Exception as e:
# print(e)
# return
# def create_code_cell(code='', where='below'):
# """Creates a code cell in the IPython Notebook.
# Args:
# code (str, optional): Code to fill the new code cell with. Defaults to ''.
# where (str, optional): Where to add the new code cell. It can be one of the following: above, below, at_bottom. Defaults to 'below'.
# """
# import base64
# from IPython.display import Javascript, display
# encoded_code = (base64.b64encode(str.encode(code))).decode()
# display(Javascript("""
# var code = IPython.notebook.insert_cell_{0}('code');
# code.set_text(atob("{1}"));
# """.format(where, encoded_code)))
# def in_colab_shell():
# """Tests if the code is being executed within Google Colab."""
# try:
# import google.colab # pylint: disable=unused-variable
# return True
# except ImportError:
# return False
# def is_drive_mounted():
# """Checks whether Google Drive is mounted in Google Colab.
# Returns:
# bool: Returns True if Google Drive is mounted, False otherwise.
# """
# drive_path = '/content/drive/My Drive'
# if os.path.exists(drive_path):
# return True
# else:
# return False
# def credentials_in_drive():
# """Checks if the ee credentials file exists in Google Drive.
# Returns:
# bool: Returns True if Google Drive is mounted, False otherwise.
# """
# credentials_path = '/content/drive/My Drive/.config/earthengine/credentials'
# if os.path.exists(credentials_path):
# return True
# else:
# return False
# def credentials_in_colab():
# """Checks if the ee credentials file exists in Google Colab.
# Returns:
# bool: Returns True if Google Drive is mounted, False otherwise.
# """
# credentials_path = '/root/.config/earthengine/credentials'
# if os.path.exists(credentials_path):
# return True
# else:
# return False
# def copy_credentials_to_drive():
# """Copies ee credentials from Google Colab to Google Drive.
# """
# import shutil
# src = '/root/.config/earthengine/credentials'
# dst = '/content/drive/My Drive/.config/earthengine/credentials'
# wd = os.path.dirname(dst)
# if not os.path.exists(wd):
# os.makedirs(wd)
# shutil.copyfile(src, dst)
# def copy_credentials_to_colab():
# """Copies ee credentials from Google Drive to Google Colab.
# """
# import shutil
# src = '/content/drive/My Drive/.config/earthengine/credentials'
# dst = '/root/.config/earthengine/credentials'
# wd = os.path.dirname(dst)
# if not os.path.exists(wd):
# os.makedirs(wd)
# shutil.copyfile(src, dst)
| 37.676374
| 282
| 0.577259
|
6ea205edc357c8153620b9167e73cbe93292f6d2
| 3,964
|
py
|
Python
|
parkings/migrations/0001_initial.py
|
klemmari1/parkkihubi
|
93218c6046c0910e8a4c723dc7128c6eec085b8c
|
[
"MIT"
] | 12
|
2016-11-29T15:13:10.000Z
|
2021-06-12T06:45:38.000Z
|
parkings/migrations/0001_initial.py
|
niuzhipeng123/parkkihubi
|
93218c6046c0910e8a4c723dc7128c6eec085b8c
|
[
"MIT"
] | 154
|
2016-11-30T09:07:58.000Z
|
2022-02-12T08:29:36.000Z
|
parkings/migrations/0001_initial.py
|
niuzhipeng123/parkkihubi
|
93218c6046c0910e8a4c723dc7128c6eec085b8c
|
[
"MIT"
] | 15
|
2016-11-29T19:32:48.000Z
|
2022-01-05T11:31:39.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-05 14:40
from __future__ import unicode_literals
from django.conf import settings
import django.contrib.gis.db.models.fields
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='time created')),
('modified_at', models.DateTimeField(auto_now=True, verbose_name='time modified')),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('city', models.CharField(blank=True, max_length=80, verbose_name='city')),
('postal_code', models.CharField(blank=True, max_length=20, verbose_name='postal code')),
('street', models.CharField(blank=True, max_length=128, verbose_name='street address')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Operator',
fields=[
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='time created')),
('modified_at', models.DateTimeField(auto_now=True, verbose_name='time modified')),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=80, verbose_name='name')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL, verbose_name='user')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Parking',
fields=[
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='time created')),
('modified_at', models.DateTimeField(auto_now=True, verbose_name='time modified')),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('device_identifier', models.CharField(db_index=True, max_length=128, verbose_name='device identifier')),
('location', django.contrib.gis.db.models.fields.PointField(srid=4326, verbose_name='location')),
('registration_number', models.CharField(db_index=True, max_length=10, validators=[django.core.validators.RegexValidator('^[A-Z0-9-]+$')], verbose_name='registration number')),
('resident_code', models.CharField(blank=True, max_length=1, validators=[django.core.validators.RegexValidator('^[A-Z]{1}$')], verbose_name='resident parking code')),
('special_code', models.CharField(blank=True, max_length=10, verbose_name='special parking code')),
('time_end', models.DateTimeField(db_index=True, verbose_name='parking end time')),
('time_start', models.DateTimeField(db_index=True, verbose_name='parking start time')),
('zone', models.IntegerField(validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(3)], verbose_name='zone number')),
('address', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='parkings', to='parkings.Address', verbose_name='address')),
('operator', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='parkings', to='parkings.Operator', verbose_name='operator')),
],
options={
'abstract': False,
},
),
]
| 55.830986
| 192
| 0.637235
|
4a56ea815ef31f36c81edbddad36d4e524ca11ec
| 6,898
|
py
|
Python
|
odp/publish/catalogue/datacite.py
|
SAEONData/Open-Data-Platform
|
cfd2a53e145ec86e187d7c4e1260df17ec6dcb03
|
[
"MIT"
] | 2
|
2021-03-04T07:09:47.000Z
|
2022-01-02T19:23:41.000Z
|
odp/publish/catalogue/datacite.py
|
SAEONData/Open-Data-Platform
|
cfd2a53e145ec86e187d7c4e1260df17ec6dcb03
|
[
"MIT"
] | 18
|
2020-09-16T09:16:45.000Z
|
2022-01-25T14:17:42.000Z
|
odp/publish/catalogue/datacite.py
|
SAEONData/Open-Data-Platform
|
cfd2a53e145ec86e187d7c4e1260df17ec6dcb03
|
[
"MIT"
] | 1
|
2021-06-25T13:02:57.000Z
|
2021-06-25T13:02:57.000Z
|
import logging
from datetime import datetime, timezone
from typing import Tuple
import pydantic
from sqlalchemy import or_, and_, not_
from odp.api.models.datacite import DataciteRecordIn
from odp.db import session, transaction
from odp.db.models import CatalogueRecord, DataciteRecord
from odp.lib.datacite import DataciteClient
from odp.lib.exceptions import DataciteError
from odp.publish.catalogue import Catalogue
logger = logging.getLogger(__name__)
class DataciteCatalogue(Catalogue):
def __init__(
self,
datacite_api_url: str,
datacite_username: str,
datacite_password: str,
doi_prefix: str,
doi_landing_page_base_url: str,
batch_size: int,
max_retries: int,
):
self.datacite = DataciteClient(
api_url=datacite_api_url,
username=datacite_username,
password=datacite_password,
doi_prefix=doi_prefix,
)
self.doi_landing_page_base_url = doi_landing_page_base_url
self.batch_size = batch_size
self.max_retries = max_retries
def synchronize(self) -> None:
published = 0
unpublished = 0
unchanged = 0
errors = 0
try:
check_records = session.query(CatalogueRecord, DataciteRecord). \
outerjoin(DataciteRecord). \
filter(not_( # exclude records without DOIs
CatalogueRecord.catalogue_record.comparator.contains({'doi': None})
)). \
filter(or_(
DataciteRecord.metadata_id == None,
DataciteRecord.checked < CatalogueRecord.updated,
# check if the DOI resolution URL that we publish to DataCite must
# be updated; unlike a change in metadata, this might result from
# a config (or code) change, but not from a catalogue record update
DataciteRecord.url != self.doi_landing_page_base_url + '/' + DataciteRecord.doi,
)). \
limit(self.batch_size).all()
# clear errors and retries for records that we have selected to check
for catrec, dcrec in check_records:
if dcrec is not None and dcrec.error is not None:
dcrec.error = None
dcrec.retries = None
dcrec.save()
failed_ids = session.query(CatalogueRecord.metadata_id).join(DataciteRecord).filter(
and_(
DataciteRecord.error != None,
DataciteRecord.retries < self.max_retries,
),
).limit(self.batch_size).all()
syncable_ids = [catrec.metadata_id for catrec, dcrec in check_records] + \
[record_id for record_id, in failed_ids]
logger.info(f"Selected {len(syncable_ids)} records to synchronize with DataCite")
for record_id in syncable_ids:
pub, unpub, unchg, err = self._syncrecord(record_id)
published += pub
unpublished += unpub
unchanged += unchg
errors += err
except Exception as e:
logger.critical(str(e))
finally:
session.remove()
logger.info(f"{published} published; "
f"{unpublished} un-published; "
f"{unchanged} unchanged; "
f"{errors} errors")
def _syncrecord(self, record_id: str) -> Tuple[bool, bool, bool, bool]:
logger.debug(f"Syncing record {record_id}")
published = False
unpublished = False
unchanged = False
error = False
with transaction():
catrec, dcrec = session.query(CatalogueRecord, DataciteRecord).outerjoin(DataciteRecord). \
filter(CatalogueRecord.metadata_id == record_id). \
one()
doi = catrec.catalogue_record['doi']
if dcrec is None:
dcrec = DataciteRecord(metadata_id=record_id, doi=doi, published=False)
try:
datacite_record = DataciteRecordIn(
doi=doi,
url=f'{self.doi_landing_page_base_url}/{doi}',
metadata=catrec.catalogue_record['metadata'],
)
datacite_record_dict = datacite_record.dict()
except pydantic.ValidationError:
datacite_record = None
datacite_record_dict = None
publish = catrec.published and doi is not None and datacite_record is not None
try:
if dcrec.published and (not publish or dcrec.doi.lower() != doi.lower()):
# the record is currently published and should be unpublished;
# if the DOI has changed, we must also first unpublish the record
logger.info(f"Unpublishing record {record_id} with DOI {dcrec.doi}")
self.datacite.unpublish_doi(
dcrec.doi,
)
dcrec.published = False
dcrec.updated = datetime.now(timezone.utc)
unpublished = True
if publish and (not dcrec.published or datacite_record_dict != {
'doi': dcrec.doi,
'url': dcrec.url,
'metadata': dcrec.metadata_,
}):
# the record should be published; it is either not currently published,
# or it is published but one or more properties has changed
logger.info(f"Publishing record {record_id} with DOI {doi}")
self.datacite.publish_doi(
datacite_record,
)
dcrec.doi = doi
dcrec.url = datacite_record.url
dcrec.metadata_ = datacite_record.metadata
dcrec.published = True
dcrec.updated = datetime.now(timezone.utc)
published = True
if not (published or unpublished):
logger.debug(f"No change for record {record_id}")
unchanged = True
dcrec.error = None
dcrec.retries = None
except DataciteError as e:
dcrec.error = f'{e.status_code}: {e.error_detail}'
dcrec.retries = dcrec.retries + 1 if dcrec.retries is not None else 0
logger.error(f"Error syncing record {record_id} with DataCite: {dcrec.error}")
error = True
dcrec.checked = datetime.now(timezone.utc)
dcrec.save()
return published, unpublished, unchanged, error
| 41.059524
| 103
| 0.556103
|
e5f8e333b152d0a60932b52153efdc5ecf2118f8
| 324
|
py
|
Python
|
westcoast_stream.py
|
smmaurer/twitter-streaming
|
5ddfa2faf6afcdaeffac6af7da4adea6989132f2
|
[
"MIT"
] | 1
|
2015-10-30T00:08:18.000Z
|
2015-10-30T00:08:18.000Z
|
westcoast_stream.py
|
smmaurer/twitter-streaming
|
5ddfa2faf6afcdaeffac6af7da4adea6989132f2
|
[
"MIT"
] | null | null | null |
westcoast_stream.py
|
smmaurer/twitter-streaming
|
5ddfa2faf6afcdaeffac6af7da4adea6989132f2
|
[
"MIT"
] | 2
|
2015-12-04T18:54:17.000Z
|
2019-02-06T03:53:11.000Z
|
__author__ = "Sam Maurer"
__date__ = "October 1, 2015"
__license__ = "MIT"
# runtime hack to import code from a subfolder
import sys
sys.path.insert(0, 'stream_automator/')
import stream_automator
s = stream_automator.Stream(
fname_base = 'westcoast-',
time_limit = 10,
bbox = '-126,29,-113,51')
s.begin_stream()
| 19.058824
| 46
| 0.716049
|
4a6fb6213fe1613bafbecd86a538f2875093991d
| 2,808
|
py
|
Python
|
bioprocs/bed.py
|
LeaveYeah/bioprocs
|
c5d2ddcc837f5baee00faf100e7e9bd84222cfbf
|
[
"MIT"
] | null | null | null |
bioprocs/bed.py
|
LeaveYeah/bioprocs
|
c5d2ddcc837f5baee00faf100e7e9bd84222cfbf
|
[
"MIT"
] | 2
|
2019-02-15T22:59:49.000Z
|
2019-02-15T23:03:09.000Z
|
bioprocs/bed.py
|
LeaveYeah/bioprocs
|
c5d2ddcc837f5baee00faf100e7e9bd84222cfbf
|
[
"MIT"
] | null | null | null |
from pyppl import Proc, Box
from .bedtools import *
from . import params
"""
@name:
pBedSort
@description:
Sort bed files
@input:
`infile:file`: The input file
@output:
`outfile:file`: The output file
@args:
`tool`: The tool used to sort the file. Default: sort (bedtools, bedops)
`bedtools`: The path to bedtools. Default: bedtools
`bedops_sort`: The path to bedops' sort-bed. Default: sort-bed
`mem`: The memory to use. Default: 8G
`by`: Sort by coordinates("coord", default) or name("name")
- Only available when use tool `sort`
`tmpdir`: The tmpdir to use. Default: `$TMPDIR`
`unique`: Remove the dupliated records? Default: True
`params`: Other params for `tool`. Default: {}
@requires:
[`bedtools`](http://bedtools.readthedocs.io/en/latest/index.html)
[`bedops`](https://github.com/bedops/bedops)
"""
pBedSort = Proc(desc = 'Sort bed files.')
pBedSort.input = "infile:file"
pBedSort.output = "outfile:file:{{i.infile | bn}}"
pBedSort.args.tool = 'sort'
pBedSort.args.bedtools = params.bedtools.value
pBedSort.args.bedops = params.bedops_sort.value
pBedSort.args.mem = '8G'
pBedSort.args.by = 'coord'
pBedSort.args.unique = True
pBedSort.args.params = Box()
pBedSort.args.tmpdir = params.tmpdir.value
pBedSort.lang = params.python.value
pBedSort.script = "file:scripts/bed/pBedSort.py"
"""
@name:
pBedLiftover
@description:
Lift over bed files.
@input:
`infile:file`: The input bed file
@output:
`outfile:file`: The output file
`umfile:file` : The unmapped file
@args:
`liftover`: The liftover program
`lochain` : the liftover chain file
@require:
`liftover` from UCSC
"""
pBedLiftover = Proc(desc = 'Lift over bed files.')
pBedLiftover.input = 'infile:file'
pBedLiftover.output = 'outfile:file:{{i.infile | bn}}, umfile:file:{{i.infile | fn}}.unmapped{{i.infile | ext}}'
pBedLiftover.args.liftover = params.liftover.value
pBedLiftover.args.lochain = params.lochai.value
pBedLiftover.args.params = Box()
pBedLiftover.lang = params.python.value
pBedLiftover.script = "file:scripts/bed/pBedLiftover.py"
"""
@name:
pGff2Bed
@description:
Convert GTF/GFF file to BED file
@input:
`infile:file`: The input gtf/gff file
@output:
`outfile:file`: The converted bed file
@args:
`attr2name`: The function used to convert attributes from GTF/GFF file to BED field 'name'
"""
pGff2Bed = Proc(desc = 'Convert GTF/GFF file to BED file')
pGff2Bed.input = 'infile:file'
pGff2Bed.output = 'outfile:file:{{i.infile | fn}}.bed'
pGff2Bed.args.attr2name = None
pGff2Bed.args.keepinfo = True
pGff2Bed.lang = params.python.value
pGff2Bed.script = "file:scripts/bed/pGff2Bed.py"
| 33.035294
| 119
| 0.673789
|
b1e07bda4ad13d68f81204e13c1b95eeb98054d3
| 981
|
py
|
Python
|
scraper/storage_spiders/hotdealvn.py
|
chongiadung/choinho
|
d2a216fe7a5064d73cdee3e928a7beef7f511fd1
|
[
"MIT"
] | null | null | null |
scraper/storage_spiders/hotdealvn.py
|
chongiadung/choinho
|
d2a216fe7a5064d73cdee3e928a7beef7f511fd1
|
[
"MIT"
] | 10
|
2020-02-11T23:34:28.000Z
|
2022-03-11T23:16:12.000Z
|
scraper/storage_spiders/hotdealvn.py
|
chongiadung/choinho
|
d2a216fe7a5064d73cdee3e928a7beef7f511fd1
|
[
"MIT"
] | 3
|
2018-08-05T14:54:25.000Z
|
2021-06-07T01:49:59.000Z
|
# Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//h1[@class='product__title']",
'price' : "//div[@class='product__details']//span[@class='price__value']",
'category' : "//div[@class='breadcrumb']/div[@class='container']/ol/li/a",
'description' : "//div[@class='tab-content tabs__content']",
'images' : "//img[@itemprop='image']/@src",
'canonical' : "//link[@rel='canonical']/@href",
'base_url' : "",
'brand' : ""
}
name = 'hotdeal.vn'
allowed_domains = ['hotdeal.vn']
start_urls = ['http://www.hotdeal.vn/']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow=['/[a-zA-Z0-9-]+-\d+\.html$']), 'parse_item'),
Rule(LinkExtractor(allow=['/[a-zA-Z0-9-]+/($|\?page=\d+$)']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
| 36.333333
| 78
| 0.626911
|
9f123a6f57a32c5618b22793b285d9bd045f4f00
| 14,528
|
py
|
Python
|
RsaCtfTool.py
|
it5prasoon/RsaCtfTool
|
87ffaa857b22deaa7c33b0c29e55bf0875d10ddf
|
[
"Beerware"
] | 1
|
2021-06-05T13:42:21.000Z
|
2021-06-05T13:42:21.000Z
|
RsaCtfTool.py
|
it5prasoon/RsaCtfTool
|
87ffaa857b22deaa7c33b0c29e55bf0875d10ddf
|
[
"Beerware"
] | null | null | null |
RsaCtfTool.py
|
it5prasoon/RsaCtfTool
|
87ffaa857b22deaa7c33b0c29e55bf0875d10ddf
|
[
"Beerware"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
----------------------------------------------------------------------------
"THE BEER-WARE LICENSE" (Revision 42):
ganapati (@G4N4P4T1) wrote this file. As long as you retain this notice you
can do whatever you want with this stuff. If we meet some day, and you think
this stuff is worth it, you can buy me a beer in return.
----------------------------------------------------------------------------
"""
import sys, os
import logging
import argparse
import urllib3
import tempfile
from glob import glob
from Crypto.PublicKey import RSA
from lib.rsa_attack import RSAAttack
from lib.rsalibnum import invmod
from lib.utils import get_numeric_value, print_results, get_base64_value, n2s
from os.path import dirname, basename, isfile, join
from urllib3.exceptions import InsecureRequestWarning
from lib.customlogger import CustomFormatter, logger_levels
from lib.keys_wrapper import (
generate_pq_from_n_and_p_or_q,
generate_keys_from_p_q_e_n,
PrivateKey,
)
from lib.idrsa_pub_disector import disect_idrsa_pub
from lib.is_roca_test import is_roca_vulnerable
# Remove insecure warning for factordb.com
urllib3.disable_warnings(InsecureRequestWarning)
# Change recursion limit for... you know, factorizing stuff...
sys.setrecursionlimit(5000)
cRED = "\033[1;31m"
cEND = "\033[0m"
banner = """
__________ R_______________________________E __ .__
\______ \ ___________ R\_ ___ \__ ___/\_ _____/E/ |_ ____ ____ | |
| _// ___/\__ \ R/ \ \/ | | | __)E \ __\/ _ \ / _ \| |
| | \\\___ \ / __ \R\ \____| | | \E | | ( <_> | <_> ) |__
|____|_ /____ >(____ /R\______ /|____| \___ /E |__| \____/ \____/|____/
\/ \/ \/ R\/E R\/E
""".replace(
"R", cRED
).replace(
"E", cEND
)
if __name__ == "__main__":
logger = logging.getLogger("global_logger")
parser = argparse.ArgumentParser(description="RSA CTF Tool")
parser.add_argument(
"--publickey", help="public key file. You can use wildcards for multiple keys."
)
parser.add_argument(
"--output", help="output file for results (privates keys, plaintext data)."
)
parser.add_argument(
"--timeout", help="Timeout for long attacks.", default=60, type=int
)
parser.add_argument(
"--createpub",
help="Take n and e from cli and just print a public key then exit",
action="store_true",
)
parser.add_argument(
"--dumpkey",
help="Just dump the RSA variables from a key - n,e,d,p,q",
action="store_true",
)
parser.add_argument(
"--ext",
help="Extended dump of RSA private variables in --dumpkey mode - dp,dq,pinv,qinv).",
action="store_true",
)
parser.add_argument("--uncipherfile", help="uncipher a file", default=None)
parser.add_argument("--uncipher", help="uncipher a cipher", default=None)
parser.add_argument(
"--verbosity", help="verbose mode", choices=logger_levels.keys(), default="INFO"
)
parser.add_argument(
"--private", help="Display private key if recovered", action="store_true"
)
parser.add_argument("--tests", help="Run tests on attacks", action="store_true")
parser.add_argument(
"--ecmdigits",
type=int,
help="Optionally an estimate as to how long one of the primes is for ECM method",
default=None,
)
parser.add_argument("-n", help="Specify the modulus. format : int or 0xhex")
parser.add_argument(
"-p", help="Specify the first prime number. format : int or 0xhex"
)
parser.add_argument(
"-q", help="Specify the second prime number. format : int or 0xhex"
)
parser.add_argument("-e", help="Specify the public exponent. format : int or 0xhex")
parser.add_argument("--key", help="Specify the private key file.")
parser.add_argument("--password", help="Private key password if needed.")
# Dynamic load all attacks for choices in argparse
attacks = glob(join(dirname(__file__), "attacks", "single_key", "*.py"))
attacks += glob(join(dirname(__file__), "attacks", "multi_keys", "*.py"))
attacks_filtered = [
basename(f)[:-3] for f in attacks if isfile(f) and not f.endswith("__init__.py")
]
attacks_list = [_ for _ in attacks_filtered if _ != "nullattack"] + ["all"]
parser.add_argument(
"--attack",
help="Specify the attack modes.",
default="all",
nargs="+",
choices=attacks_list,
)
parser.add_argument(
"--sendtofdb", help="Send results to factordb", action="store_true"
)
parser.add_argument(
"--isconspicuous", help="conspicuous key check", action="store_true"
)
parser.add_argument(
"--isroca", help="Check if given key is roca", action="store_true"
)
parser.add_argument(
"--convert_idrsa_pub", help="Convert idrsa.pub to pem", action="store_true"
)
parser.add_argument(
"--check_publickey",
help="Check publickey if modulus is well formed before attack",
action="store_true",
)
args = parser.parse_args()
unciphers = []
# Set logger level
logging.basicConfig(
level=logger_levels[args.verbosity],
)
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(CustomFormatter())
logger = logging.getLogger("global_logger")
logger.propagate = False
logger.addHandler(ch)
# If no arguments, diplay help and exit
if len(sys.argv) == 1:
print(banner)
parser.print_help()
sys.exit(1)
# Add information
if not args.private and not args.tests:
logger.warning(
"private argument is not set, the private key will not be displayed, even if recovered."
)
# Parse longs if exists
if args.p is not None:
args.p = get_numeric_value(args.p)
if args.q is not None:
args.q = get_numeric_value(args.q)
if args.e is not None:
e_array = []
for e in args.e.split(","):
e_int = get_numeric_value(e)
e_array.append(e_int)
args.e = e_array if len(e_array) > 1 else e_array[0]
# get n if we can
if args.n is not None:
args.n = get_numeric_value(args.n)
elif args.p is not None and args.q is not None:
args.n = args.p * args.q
# if we have uncipher but no uncipherfile
if args.uncipher is not None:
uncipher_array = []
for uncipher in args.uncipher.split(","):
uncipher = get_numeric_value(uncipher)
uncipher = get_base64_value(uncipher)
uncipher_array.append(n2s(uncipher))
args.uncipher = uncipher_array
# if we have uncipherfile
if args.uncipherfile is not None:
uncipher_array = []
for uncipher in args.uncipherfile.split(","):
try:
with open(uncipher, "rb") as cipherfile_fd:
uncipher = get_base64_value(cipherfile_fd.read())
uncipher_array.append(uncipher)
except OSError:
logger.info("--uncipherfile : file not found or not readable.")
exit(1)
args.uncipher = uncipher_array
# If we have a private key in input and uncipher in args (or uncipherfile)
if args.key and args.uncipher:
priv_key = PrivateKey(filename=args.key, password=args.password)
unciphers = priv_key.decrypt(args.uncipher)
print_results(args, None, priv_key, unciphers)
exit(0)
# If we have n and one of p and q, calculated the other
if args.n and (args.p or args.q):
args.p, args.q = generate_pq_from_n_and_p_or_q(args.n, args.p, args.q)
# convert a idrsa.pub file to a pem format
if args.convert_idrsa_pub:
# for publickey in args.publickey:
publickeys = glob(args.publickey)
for publickey in publickeys:
logger.info("Converting %s: to pem..." % publickey)
with open(publickey, "r") as key_data_fd:
for line in key_data_fd:
n, e = disect_idrsa_pub(line.rstrip())
if n != None and e != None:
pub_key, priv_key = generate_keys_from_p_q_e_n(None, None, e, n)
print(pub_key.decode("utf-8"))
exit(0)
if args.isroca:
pubkeyfilelist = glob(args.publickey)
for publickey in pubkeyfilelist:
logger.info("[-] Details for %s:" % publickey)
with open(publickey, "rb") as key_data_fd:
try:
key = RSA.importKey(key_data_fd.read())
except:
key = None
logger.error("[!] Error file format: %s" % publickey)
if key is not None:
if is_roca_vulnerable(key.n):
logger.warning("[!] Public key %s: is roca!!!" % publickey)
else:
logger.info(
"[-] Public key %s: is not roca, you are safe" % publickey
)
exit(0)
# Create pubkey if requested
if args.createpub:
pub_key, priv_key = generate_keys_from_p_q_e_n(args.p, args.q, args.e, args.n)
print(pub_key.decode("utf-8"))
exit(0)
# Load keys
tmpfile = None
if args.publickey is None and args.e is not None and args.n is not None:
args.publickey = []
for e in args.e if isinstance(args.e, list) else [args.e]:
tmpfile = tempfile.NamedTemporaryFile(delete=False)
with open(tmpfile.name, "wb") as tmpfd:
tmpfd.write(RSA.construct((args.n, e)).publickey().exportKey())
args.publickey.append(tmpfile.name)
elif args.publickey is not None:
if "*" in args.publickey or "?" in args.publickey:
pubkeyfilelist = glob(args.publickey)
args.publickey = pubkeyfilelist
elif "," in args.publickey:
args.publickey = args.publickey.split(",")
else:
args.publickey = [args.publickey]
# If we already have all informations
if (
args.p is not None
and args.q is not None
and args.e is not None
and args.n is not None
):
try:
pub_key, priv_key = generate_keys_from_p_q_e_n(
args.p, args.q, args.e, args.n
)
except ValueError:
logger.error(
"Looks like the values for generating key are not ok... (no invmod)"
)
exit(1)
if args.createpub:
print(pub_key)
if args.uncipher is not None:
for u in args.uncipher:
if priv_key is not None:
unciphers.append(priv_key.decrypt(args.uncipher))
else:
logger.error(
"Looks like the values for generating key are not ok... (no invmod)"
)
exit(1)
print_results(args, args.publickey[0], priv_key, unciphers)
exit(0)
# Dump public key informations
if (
args.dumpkey
and not args.private
and args.uncipher is None
and args.uncipherfile is None
and args.publickey is not None
):
for publickey in args.publickey:
logger.info("Details for %s:" % publickey)
with open(publickey, "rb") as key_data_fd:
key = RSA.importKey(key_data_fd.read())
print("n: " + str(key.n))
print("e: " + str(key.e))
exit(0)
# if dumpkey mode dump the key components then quit
if args.key is not None and args.dumpkey:
key_data = open(args.key, "rb").read()
key = RSA.importKey(key_data)
print("n: " + str(key.n))
print("e: " + str(key.e))
if key.has_private():
print("d: " + str(key.d))
print("p: " + str(key.p))
print("q: " + str(key.q))
if args.ext:
dp = key.d % (key.p - 1)
dq = key.d % (key.q - 1)
pinv = invmod(key.p, key.q)
qinv = invmod(key.q, key.p)
print("dp: " + str(dp))
print("dq: " + str(dq))
print("pinv: " + str(pinv))
print("qinv: " + str(qinv))
exit(0)
if args.key is not None and args.isconspicuous:
with open(args.key, "rb") as key_fp:
key_data = key_fp.read()
key = RSA.importKey(key_data)
try:
pub_key, priv_key = generate_keys_from_p_q_e_n(
args.p, args.q, args.e, args.n
)
except ValueError:
logger.error(
"Looks like the values for generating key are not ok... (no invmod)"
)
exit(1)
if priv_key.is_conspicuous() == True:
exit(-1)
else:
exit(0)
# Run attacks
found = False
attackobj = RSAAttack(args)
# Run tests
if args.publickey is None and args.tests:
selected_attacks = attacks_list
if args.attack is not None:
if "," not in args.attack:
selected_attacks = args.attack
if "all" in selected_attacks:
selected_attacks = attacks_list
tmpfile = tempfile.NamedTemporaryFile()
with open(tmpfile.name, "wb") as tmpfd:
tmpfd.write(RSA.construct((35, 3)).publickey().exportKey())
attackobj.attack_single_key(tmpfile.name, selected_attacks, test=True)
# Attack multiple keys
if args.publickey is not None and len(args.publickey) > 1:
found = attackobj.attack_multiple_keys(args.publickey, attacks_list)
# Attack key
if args.publickey is not None:
for publickey in args.publickey:
attackobj.implemented_attacks = []
attackobj.unciphered = []
logger.info("\n[*] Testing key %s." % publickey)
attackobj.attack_single_key(publickey, attacks_list)
if args.publickey is None:
logger.error("No key specified")
for pub in args.publickey:
try:
if "tmp" in pub:
os.remove(pub)
except:
continue
| 35.783251
| 100
| 0.572825
|
c7737de2ab3dc783a27390fbc73c819ae70e32bd
| 458
|
py
|
Python
|
SRC/tool/project/model/testCase.py
|
lizhq1/python
|
4250cf0c5045cf0e0d55e2c0f8009a0b026e43b3
|
[
"bzip2-1.0.6"
] | 1
|
2022-03-23T03:14:36.000Z
|
2022-03-23T03:14:36.000Z
|
database/schemes/easyTest/SRC/tool/project/model/testCase.py
|
TonnaMajesty/test
|
68b24d1f3e8b4d6154c9d896a7fa3e2f99b49a6f
|
[
"MIT"
] | null | null | null |
database/schemes/easyTest/SRC/tool/project/model/testCase.py
|
TonnaMajesty/test
|
68b24d1f3e8b4d6154c9d896a7fa3e2f99b49a6f
|
[
"MIT"
] | null | null | null |
# coding:utf-8
'''
测试用例模型
'''
from SRC.tool.project.model.testParam import TestParam
class TestCase():
def __init__(self,path='',param=None):
self.path=path
self.testParamPath=param
self.paramObj=None
def setTestParam(self,paramPath=None):
if paramPath:
self.paramObj = TestParam(self.testParamPath)
self.testParamPath=paramPath
else:
self.paramObj=None
self.testParamPath = None
def getTestParamObj(self):
return self.paramObj
| 19.913043
| 54
| 0.746725
|
fb2b6ddefe13f8e76daae464395f9e752b826bad
| 4,338
|
py
|
Python
|
convertextract/cli.py
|
roedoejet/convertextract
|
bf194a7d81d847d68690ea0d58dc47a70259cd78
|
[
"MIT"
] | 12
|
2016-10-20T16:17:04.000Z
|
2022-03-10T06:36:59.000Z
|
convertextract/cli.py
|
roedoejet/convertextract
|
bf194a7d81d847d68690ea0d58dc47a70259cd78
|
[
"MIT"
] | 3
|
2018-01-12T00:41:26.000Z
|
2020-08-12T05:04:45.000Z
|
convertextract/cli.py
|
roedoejet/convertextract
|
bf194a7d81d847d68690ea0d58dc47a70259cd78
|
[
"MIT"
] | 3
|
2020-08-18T21:47:03.000Z
|
2022-02-03T06:32:46.000Z
|
"""
Use argparse to handle command-line arguments.
"""
import argparse
import encodings
import os
import pkgutil
import sys
import six
import re
import glob
from g2p.mappings.langs import LANGS_NETWORK
import argcomplete
from convertextract import VERSION
from convertextract.parsers import DEFAULT_ENCODING, _get_available_extensions
from convertextract import process
from convertextract.exceptions import CommandLineError
from convertextract.colors import red
class AddToNamespaceAction(argparse.Action):
"""This adds KEY,VALUE arbitrary pairs to the argparse.Namespace object
"""
def __call__(self, parser, namespace, values, option_string=None):
key, val = values.strip().split('=')
if hasattr(namespace, key):
parser.error((
'Duplicate specification of the key "%(key)s" with --option.'
) % locals())
setattr(namespace, key, val)
# Fix FileType to honor 'b' flag, see: https://bugs.python.org/issue14156
class FileType(argparse.FileType):
def __call__(self, string):
if string == '-' and six.PY3:
if 'r' in self._mode:
string = sys.stdin.fileno()
elif 'w' in self._mode:
string = sys.stdout.fileno()
return super(FileType, self).__call__(string)
# This function is necessary to enable autodocumentation of the script
# output
def get_parser():
"""Initialize the parser for the command line interface and bind the
autocompletion functionality"""
# initialize the parser
parser = argparse.ArgumentParser(
description=(
'Command line tool for extracting text from any document. '
) % locals(),
)
# define the command line options here
parser.add_argument(
'filename', help='Filename to extract text from'
).completer = argcomplete.completers.FilesCompleter
parser.add_argument(
'-e', '--encoding', type=str, default=DEFAULT_ENCODING,
choices=_get_available_encodings(),
help='Specify the encoding of the output',
)
parser.add_argument(
'--extension', type=str, default=None,
choices=_get_available_extensions(),
help='Specify the extension of the file',
)
parser.add_argument(
'-o', '--output', type=FileType('wb'),
help='Output raw text in this file',
)
parser.add_argument('--no-write', dest='no_write', action='store_true',
help="Disable default console writing of converted file.")
parser.add_argument(
'-m', '--mapping', type=os.path.abspath,
help='Path to a lookup table for conversion. Only use this if the g2p library does not have the mapping you want.',
)
parser.add_argument('-il', '--input-language',
choices=LANGS_NETWORK.nodes,
help='The input language to be converted from, for a full list please visit https://g2p-studio.herokuapp.com/api/v1/langs')
parser.add_argument('-ol', '--output-language',
choices=LANGS_NETWORK.nodes,
help='The output language to be converted to, for a full list please visit https://g2p-studio.herokuapp.com/api/v1/langs')
parser.add_argument(
'-O', '--option', type=str, action=AddToNamespaceAction,
help=(
'Add arbitrary options to various parsers of the form '
'KEYWORD=VALUE. A full list of available KEYWORD options is '
'available at http://bit.ly/textract-options'
),
)
parser.add_argument(
'-v', '--version', action='version', version='%(prog)s '+VERSION,
)
# enable autocompletion with argcomplete
# argcomplete.autocomplete(parser)
return parser
def _get_available_encodings():
"""Get a list of the available encodings to make it easy to
tab-complete the command line interface.
Inspiration from http://stackoverflow.com/a/3824405/564709
"""
available_encodings = set(encodings.aliases.aliases.values())
paths = [os.path.dirname(encodings.__file__)]
for importer, modname, ispkg in pkgutil.walk_packages(path=paths):
available_encodings.add(modname)
available_encodings = list(available_encodings)
available_encodings.sort()
return available_encodings
| 34.15748
| 147
| 0.663209
|
7e9855732dd1cace10cc6ca27fd509480be395ca
| 13,816
|
py
|
Python
|
chrome/test/functional/media/media_constrained_network_perf.py
|
gavinp/chromium
|
681563ea0f892a051f4ef3d5e53438e0bb7d2261
|
[
"BSD-3-Clause"
] | 1
|
2016-03-10T09:13:57.000Z
|
2016-03-10T09:13:57.000Z
|
chrome/test/functional/media/media_constrained_network_perf.py
|
gavinp/chromium
|
681563ea0f892a051f4ef3d5e53438e0bb7d2261
|
[
"BSD-3-Clause"
] | 1
|
2022-03-13T08:39:05.000Z
|
2022-03-13T08:39:05.000Z
|
chrome/test/functional/media/media_constrained_network_perf.py
|
gavinp/chromium
|
681563ea0f892a051f4ef3d5e53438e0bb7d2261
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Records metrics on playing media under constrained network conditions.
Spins up a Constrained Network Server (CNS) and runs through a test matrix of
bandwidth, latency, and packet loss settings. Each run records a
time-to-playback (TTP) and extra-play-percentage (EPP) metric in a format
consumable by the Chromium perf bots.
Since even a small number of different settings yields a large test matrix, the
design is threaded... however PyAuto is not, so a global lock is used when calls
into PyAuto are necessary. The number of threads can be set by _TEST_THREADS.
The CNS code is located under: <root>/src/media/tools/constrained_network_server
"""
import itertools
import logging
import os
import Queue
import subprocess
import sys
import threading
import urllib2
import pyauto_media
import pyauto
import pyauto_paths
import pyauto_utils
# The list of tests to run. Each entry is of the form:
# 'TEST_NAME' : [BANDWIDTH_Kbps, LATENCY_ms, PACKET_LOSS_%]
#
# The first five tests were manually selected to cover a range of bad network
# constraints to good ones. Those tests resulted in stable results and are
# suitable for regression testing.
#
# The WiFi, DSL, and Cable settings were taken from webpagetest.org as
# approximations of their respective real world networks. The settings were
# based on 2011 FCC Broadband Data report (http://www.fcc.gov/document/
# measuring-broadband-america-report-consumer-broadband-performance-us).
#
# Previous tests with 2% and 5% packet loss resulted in unstable data. Thus
# packet loss is not used often for perf graphs. Tests with very low bandwidth,
# such as 56K Dial-up resulted in very slow tests (about 8 mins to run each
# test iteration). In addition, metrics for Dial-up would be out of range of the
# other tests metrics, making the graphs hard to read.
#
# Note: The test name should satisfy the regex [\w\.-]+ (check
# tools/perf_expectations/tests/perf_expectations_unittest.py for details).
#
# TODO(shadi): After recording metrics and getting stable results, remove the
# first 5 settings if the last five are enough for regression testing.
_TESTS_TO_RUN = {
'LowHighMedium': [256, 180, 2],
'LowMediumNone': [256, 105, 0],
'MediumMediumNone': [2000, 105, 0],
'HighMediumNone': [5000, 105, 0],
'HighLowNone': [5000, 43, 0],
'Wifi_1Mbps_60ms': [1024, 60, 0],
'DSL_1.5Mbps_50ms': [1541, 50, 0],
'Cable_5Mbps_28ms': [5120, 28, 0],
'NoConstraints': [0, 0, 0]
}
# HTML test path; relative to src/chrome/test/data. Loads a test video and
# records metrics in JavaScript.
_TEST_HTML_PATH = os.path.join(
'media', 'html', 'media_constrained_network.html')
# Number of threads to use during testing.
_TEST_THREADS = 3
# Number of times we run the same test to eliminate outliers.
_TEST_ITERATIONS = 3
# File name of video to collect metrics for and its duration (used for timeout).
# TODO(dalecurtis): Should be set on the command line.
_TEST_VIDEO = 'roller.webm'
_TEST_VIDEO_DURATION_SEC = 28.53
# Path to CNS executable relative to source root.
_CNS_PATH = os.path.join(
'media', 'tools', 'constrained_network_server', 'cns.py')
# Port to start the CNS on.
_CNS_PORT = 9000
# Base CNS URL, only requires & separated parameter names appended.
_CNS_BASE_URL = 'http://127.0.0.1:%d/ServeConstrained?' % _CNS_PORT
def Median(values):
"""Returns the median for a list of values."""
if not values:
return None
values = sorted(values)
if len(values) % 2 == 1:
return values[len(values) / 2]
else:
lower = values[(len(values) / 2) - 1]
upper = values[len(values) / 2]
return (float(lower + upper)) / 2
class TestWorker(threading.Thread):
"""Worker thread. For each queue entry: opens tab, runs test, closes tab."""
# Atomic, monotonically increasing task identifier. Used to ID tabs.
_task_id = itertools.count()
def __init__(self, pyauto_test, tasks, automation_lock, url):
"""Sets up TestWorker class variables.
Args:
pyauto_test: Reference to a pyauto.PyUITest instance.
tasks: Queue containing (settings, name) tuples.
automation_lock: Global automation lock for pyauto calls.
url: File URL to HTML/JavaScript test code.
"""
threading.Thread.__init__(self)
self._tasks = tasks
self._automation_lock = automation_lock
self._pyauto = pyauto_test
self._url = url
self._metrics = {}
self.fail_count = 0
self.start()
def _FindTabLocked(self, url):
"""Returns the tab index for the tab belonging to this url.
self._automation_lock must be owned by caller.
"""
for tab in self._pyauto.GetBrowserInfo()['windows'][0]['tabs']:
if tab['url'] == url:
return tab['index']
def _HaveMetricOrError(self, var_name, unique_url):
"""Checks if the page has variable value ready or if an error has occured.
The varaible value must be set to < 0 pre-run.
Args:
var_name: The variable name to check the metric for.
unique_url: The url of the page to check for the variable's metric.
Returns:
True is the var_name value is >=0 or if an error_msg exists.
"""
with self._automation_lock:
tab = self._FindTabLocked(unique_url)
self._metrics[var_name] = int(self._pyauto.GetDOMValue(var_name,
tab_index=tab))
end_test = self._pyauto.GetDOMValue('endTest', tab_index=tab)
return self._metrics[var_name] >= 0 or end_test
def _GetEventsLog(self, unique_url):
"""Returns the log of video events fired while running the test.
Args:
unique_url: The url of the page identifying the test.
"""
with self._automation_lock:
tab = self._FindTabLocked(unique_url)
return self._pyauto.GetDOMValue('eventsMsg', tab_index=tab)
def _GetVideoProgress(self, unique_url):
"""Gets the video's current play progress percentage.
Args:
unique_url: The url of the page to check for video play progress.
"""
with self._automation_lock:
return int(self._pyauto.CallJavascriptFunc(
'calculateProgress', tab_index=self._FindTabLocked(unique_url)))
def run(self):
"""Opens tab, starts HTML test, and records metrics for each queue entry.
No exception handling is done to make sure the main thread exits properly
during Chrome crashes or other failures. Doing otherwise has the potential
to leave the CNS server running in the background.
For a clean shutdown, put the magic exit value (None, None) in the queue.
"""
while True:
series_name, settings = self._tasks.get()
# Check for magic exit values.
if (series_name, settings) == (None, None):
break
# Build video source URL. Values <= 0 mean the setting is disabled.
video_url = [_CNS_BASE_URL, 'f=' + _TEST_VIDEO]
if settings[0] > 0:
video_url.append('bandwidth=%d' % settings[0])
if settings[1] > 0:
video_url.append('latency=%d' % settings[1])
if settings[2] > 0:
video_url.append('loss=%d' % settings[2])
video_url.append('new_port=true')
video_url = '&'.join(video_url)
ttp_results = []
epp_results = []
for iter_num in xrange(_TEST_ITERATIONS):
# Make the test URL unique so we can figure out our tab index later.
unique_url = '%s?%d' % (self._url, TestWorker._task_id.next())
# Start the test!
with self._automation_lock:
self._pyauto.AppendTab(pyauto.GURL(unique_url))
self._pyauto.CallJavascriptFunc(
'startTest', [video_url],
tab_index=self._FindTabLocked(unique_url))
# Wait until the necessary metrics have been collected. Okay to not lock
# here since pyauto.WaitUntil doesn't call into Chrome.
self._metrics['epp'] = self._metrics['ttp'] = -1
self._pyauto.WaitUntil(
self._HaveMetricOrError, args=['ttp', unique_url], retry_sleep=1,
timeout=10, debug=False)
# Do not wait for epp if ttp is not available.
if self._metrics['ttp'] >= 0:
ttp_results.append(self._metrics['ttp'])
self._pyauto.WaitUntil(
self._HaveMetricOrError, args=['epp', unique_url], retry_sleep=2,
timeout=_TEST_VIDEO_DURATION_SEC * 10, debug=False)
if self._metrics['epp'] >= 0:
epp_results.append(self._metrics['epp'])
logging.debug('Iteration:%d - Test %s ended with %d%% of the video '
'played.', iter_num, series_name,
self._GetVideoProgress(unique_url),)
if self._metrics['ttp'] < 0 or self._metrics['epp'] < 0:
logging.error('Iteration:%d - Test %s failed to end gracefully due '
'to time-out or error.\nVideo events fired:\n%s',
iter_num, series_name, self._GetEventsLog(unique_url))
# Close the tab.
with self._automation_lock:
self._pyauto.GetBrowserWindow(0).GetTab(
self._FindTabLocked(unique_url)).Close(True)
# Check if any of the tests failed to report the metrics.
if not len(ttp_results) == len(epp_results) == _TEST_ITERATIONS:
self.fail_count += 1
# End of iterations, print results,
logging.debug('TTP results: %s', ttp_results)
logging.debug('EPP results: %s', epp_results)
pyauto_utils.PrintPerfResult('epp', series_name, max(epp_results), '%')
pyauto_utils.PrintPerfResult('ttp', series_name,
Median(ttp_results), 'ms')
# TODO(dalecurtis): Check results for regressions.
self._tasks.task_done()
class ProcessLogger(threading.Thread):
"""A thread to log a process's stderr output."""
def __init__(self, process):
"""Starts the process logger thread.
Args:
process: The process to log.
"""
threading.Thread.__init__(self)
self._process = process
self.start()
def run(self):
"""Adds debug statements for the process's stderr output."""
line = True
while line:
line = self._process.stderr.readline()
logging.debug(line.strip())
class MediaConstrainedNetworkPerfTest(pyauto.PyUITest):
"""PyAuto test container. See file doc string for more information."""
def setUp(self):
"""Starts the Constrained Network Server (CNS)."""
cmd = [sys.executable, os.path.join(pyauto_paths.GetSourceDir(), _CNS_PATH),
'--port', str(_CNS_PORT),
'--interface', 'lo',
'--www-root', os.path.join(
self.DataDir(), 'pyauto_private', 'media'),
'-v',
'--expiry-time', '0']
process = subprocess.Popen(cmd, stderr=subprocess.PIPE)
# Wait for server to start up.
line = True
while line:
line = process.stderr.readline()
logging.debug(line.strip())
if 'STARTED' in line:
self._server_pid = process.pid
pyauto.PyUITest.setUp(self)
ProcessLogger(process)
if self._CanAccessServer():
return
# Need to call teardown since the server has already started.
self.tearDown()
self.fail('Failed to start CNS.')
def _CanAccessServer(self):
"""Checks if the CNS server can serve a file with no network constraints."""
test_url = ''.join([_CNS_BASE_URL, 'f=', _TEST_VIDEO])
try:
return urllib2.urlopen(test_url) is not None
except Exception, e:
logging.exception(e)
return False
def tearDown(self):
"""Stops the Constrained Network Server (CNS)."""
pyauto.PyUITest.tearDown(self)
self.Kill(self._server_pid)
def _RunDummyTest(self, automation_lock, test_url):
"""Runs a dummy test with high bandwidth and no latency or packet loss.
Fails the unit test if the dummy test does not end.
Args:
automation_lock: Global automation lock for pyauto calls.
test_url: File URL to HTML/JavaScript test code.
"""
tasks = Queue.Queue()
tasks.put(('Dummy Test', [5000, 0, 0]))
tasks.put((None, None))
dummy_test = TestWorker(self, tasks, automation_lock, test_url)
dummy_test.join()
# Dummy test should successfully finish by storing epp results.
if dummy_test.fail_count:
self.fail('Failed to run dummy test.')
def testConstrainedNetworkPerf(self):
"""Starts CNS, spins up worker threads to run through _TEST_CONSTRAINTS."""
# Convert relative test path into an absolute path.
test_url = self.GetFileURLForDataPath(_TEST_HTML_PATH)
# PyAuto doesn't support threads, so we synchronize all automation calls.
automation_lock = threading.Lock()
# Run a dummy test to avoid Chrome/CNS startup overhead.
logging.debug('Starting a dummy test to avoid Chrome/CNS startup overhead.')
self._RunDummyTest(automation_lock, test_url)
logging.debug('Dummy test has finished. Starting real perf tests.')
# Spin up worker threads.
tasks = Queue.Queue()
threads = []
for _ in xrange(_TEST_THREADS):
threads.append(TestWorker(self, tasks, automation_lock, test_url))
for series_name, settings in _TESTS_TO_RUN.iteritems():
logging.debug('Add test: %s\tSettings: %s', series_name, settings)
tasks.put((series_name, settings))
# Add shutdown magic to end of queue.
for thread in threads:
tasks.put((None, None))
# Wait for threads to exit, gracefully or otherwise.
for thread in threads:
thread.join()
if __name__ == '__main__':
# TODO(dalecurtis): Process command line parameters here.
pyauto_media.Main()
| 35.885714
| 80
| 0.675594
|
e2f6506c4524215cd0e2b2121a900ba4452b678e
| 26,705
|
py
|
Python
|
sdk/tables/azure-data-tables/azure/data/tables/_table_batch.py
|
lynshi/azure-sdk-for-python
|
40c530f2e9a6d93025b01cc8f6c94829c7fe95fc
|
[
"MIT"
] | null | null | null |
sdk/tables/azure-data-tables/azure/data/tables/_table_batch.py
|
lynshi/azure-sdk-for-python
|
40c530f2e9a6d93025b01cc8f6c94829c7fe95fc
|
[
"MIT"
] | null | null | null |
sdk/tables/azure-data-tables/azure/data/tables/_table_batch.py
|
lynshi/azure-sdk-for-python
|
40c530f2e9a6d93025b01cc8f6c94829c7fe95fc
|
[
"MIT"
] | null | null | null |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from typing import (
TYPE_CHECKING,
Union,
Any,
Dict,
Mapping,
Optional
)
from ._common_conversion import _is_cosmos_endpoint, _transform_patch_to_cosmos_post
from ._models import UpdateMode
from ._serialize import _get_match_headers, _add_entity_properties
from ._entity import TableEntity
if TYPE_CHECKING:
from ._generated import models
EntityType = Union[TableEntity, Mapping[str, Any]]
class TableBatchOperations(object):
"""
This is the class that is used for batch operations for the data tables
service.
The Tables service supports batch transactions on entities that are in the
same table and belong to the same partition group. Multiple operations are
supported within a single transaction. The batch can include at most 100
entities, and its total payload may be no more than 4 MB in size.
"""
def __init__(
self,
client, # type: AzureTable
serializer, # type: msrest.Serializer
deserializer, # type: msrest.Deserializer
config, # type: AzureTableConfiguration
table_name, # type: str
**kwargs # type: Dict[str, Any]
):
"""Create TableClient from a Credential.
:param client: an AzureTable object
:type client: AzureTable
:param serializer: serializer object for request serialization
:type serializer: msrest.Serializer
:param deserializer: deserializer object for request serialization
:type deserializer: msrest.Deserializer
:param config: Azure Table Configuration object
:type config: AzureTableConfiguration
:param table_name: name of the Table to perform operations on
:type table_name: str
:param table_client: TableClient object to perform operations on
:type table_client: TableClient
:returns: None
"""
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
self.table_name = table_name
self._partition_key = kwargs.pop("partition_key", None)
self.requests = []
def __len__(self):
return len(self.requests)
def _verify_partition_key(
self, entity # type: EntityType
):
# (...) -> None
if self._partition_key is None:
self._partition_key = entity["PartitionKey"]
elif entity["PartitionKey"] != self._partition_key:
raise ValueError("Partition Keys must all be the same")
def create(
self,
entity, # type: EntityType
**kwargs # type: Any
):
# type: (...) -> None
"""Adds an insert operation to the current batch.
:param entity: The properties for the table entity.
:type entity: :class:`~azure.data.tables.TableEntity` or Dict[str,str]
:return: None
:rtype: None
:raises ValueError:
.. admonition:: Example:
.. literalinclude:: ../samples/sample_batching.py
:start-after: [START batching]
:end-before: [END batching]
:language: python
:dedent: 8
:caption: Creating and adding an entity to a Table
"""
self._verify_partition_key(entity)
temp = entity.copy()
if "PartitionKey" in temp and "RowKey" in temp:
temp = _add_entity_properties(temp)
else:
raise ValueError("PartitionKey and/or RowKey were not provided in entity")
self._batch_create_entity(table=self.table_name, entity=temp, **kwargs)
def _batch_create_entity(
self,
table, # type: str
entity, # type: EntityType
timeout=None, # type: Optional[int]
request_id_parameter=None, # type: Optional[str]
response_preference="return-no-content", # type: Optional[Union[str, "models.ResponseFormat"]]
query_options=None, # type: Optional["models.QueryOptions"]
**kwargs # type: Any
):
# (...) -> None
"""
Adds an insert operation to the batch. See
:func:`azure.data.tables.TableClient.insert_entity` for more information
on insert operations.
The operation will not be executed until the batch is committed
:param: table:
The table to perform the operation on
:type: table: str
:param: entity:
The entity to insert. Can be a dict or an entity object
Must contain a PartitionKey and a RowKey.
:type: entity: dict or :class:`~azure.data.tables.models.Entity`
"""
_format = None
if query_options is not None:
_format = query_options.format
data_service_version = "3.0"
content_type = kwargs.pop("content_type", "application/json;odata=nometadata")
accept = "application/json;odata=minimalmetadata"
# Construct URL
url = self._batch_create_entity.metadata["url"] # type: ignore
path_format_arguments = {
"url": self._serialize.url(
"self._config.url", self._config.url, "str", skip_quote=True
),
"table": self._serialize.url("table", table, "str"),
}
url = self._client._client.format_url( # pylint: disable=protected-access
url, **path_format_arguments
)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if timeout is not None:
query_parameters["timeout"] = self._serialize.query(
"timeout", timeout, "int", minimum=0
)
if _format is not None:
query_parameters["$format"] = self._serialize.query(
"format", _format, "str"
)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters["x-ms-version"] = self._serialize.header(
"self._config.version", self._config.version, "str"
)
if request_id_parameter is not None:
header_parameters["x-ms-client-request-id"] = self._serialize.header(
"request_id_parameter", request_id_parameter, "str"
)
header_parameters["DataServiceVersion"] = self._serialize.header(
"data_service_version", data_service_version, "str"
)
if response_preference is not None:
header_parameters["Prefer"] = self._serialize.header(
"response_preference", response_preference, "str"
)
header_parameters["Content-Type"] = self._serialize.header(
"content_type", content_type, "str"
)
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
body_content_kwargs = {} # type: Dict[str, Any]
if entity is not None:
body_content = self._serialize.body(entity, "{object}")
else:
body_content = None
body_content_kwargs["content"] = body_content
request = self._client._client.post( # pylint: disable=protected-access
url, query_parameters, header_parameters, **body_content_kwargs
)
self.requests.append(request)
_batch_create_entity.metadata = {"url": "/{table}"} # type: ignore
def update(
self,
entity, # type: EntityType
mode=UpdateMode.MERGE, # type: Union[str, UpdateMode]
**kwargs # type: Any
):
# (...) -> None
"""Adds an update operation to the current batch.
:param entity: The properties for the table entity.
:type entity: :class:`~azure.data.tables.TableEntity` or Dict[str,str]
:param mode: Merge or Replace entity
:type mode: :class:`~azure.data.tables.UpdateMode`
:keyword str etag: Etag of the entity
:keyword match_condition: MatchCondition
:paramtype match_condition: ~azure.core.MatchCondition
:return: None
:rtype: None
:raises ValueError:
.. admonition:: Example:
.. literalinclude:: ../samples/sample_batching.py
:start-after: [START batching]
:end-before: [END batching]
:language: python
:dedent: 8
:caption: Creating and adding an entity to a Table
"""
self._verify_partition_key(entity)
temp = entity.copy()
match_condition = kwargs.pop("match_condition", None)
etag = kwargs.pop("etag", None)
if match_condition and not etag:
try:
etag = entity.metadata.get("etag", None)
except (AttributeError, TypeError):
pass
if_match, _ = _get_match_headers(
kwargs=dict(
kwargs,
etag=etag,
match_condition=match_condition,
),
etag_param="etag",
match_param="match_condition",
)
partition_key = temp["PartitionKey"]
row_key = temp["RowKey"]
temp = _add_entity_properties(temp)
if mode is UpdateMode.REPLACE:
self._batch_update_entity(
table=self.table_name,
partition_key=partition_key,
row_key=row_key,
if_match=if_match or "*",
table_entity_properties=temp,
**kwargs
)
elif mode is UpdateMode.MERGE:
self._batch_merge_entity(
table=self.table_name,
partition_key=partition_key,
row_key=row_key,
if_match=if_match or "*",
table_entity_properties=temp,
**kwargs
)
def _batch_update_entity(
self,
table, # type: str
partition_key, # type: str
row_key, # type: str
timeout=None, # type: Optional[int]
request_id_parameter=None, # type: Optional[str]
if_match=None, # type: Optional[str]
table_entity_properties=None, # type: Optional[EntityType]
query_options=None, # type: Optional["models.QueryOptions"]
**kwargs # type: Any
):
# type: (...) -> None
"""Update entity in a table.
:param table: The name of the table.
:type table: str
:param partition_key: The partition key of the entity.
:type partition_key: str
:param row_key: The row key of the entity.
:type row_key: str
:param timeout: The timeout parameter is expressed in seconds.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when analytics logging is enabled.
:type request_id_parameter: str
:param if_match: Match condition for an entity to be updated. If specified and a matching
entity is not found, an error will be raised. To force an unconditional update, set to the
wildcard character (*). If not specified, an insert will be performed when no existing entity
is found to update and a replace will be performed if an existing entity is found.
:type if_match: str
:param table_entity_properties: The properties for the table entity.
:type table_entity_properties: dict[str, object]
:param query_options: Parameter group.
:type query_options: ~azure.data.tables.models.QueryOptions
:return: None
:rtype: None
"""
_format = None
if query_options is not None:
_format = query_options.format
data_service_version = "3.0"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._batch_update_entity.metadata["url"] # type: ignore
path_format_arguments = {
"url": self._serialize.url(
"self._config.url", self._config.url, "str", skip_quote=True
),
"table": self._serialize.url("table", table, "str"),
"partitionKey": self._serialize.url("partition_key", partition_key, "str"),
"rowKey": self._serialize.url("row_key", row_key, "str"),
}
url = self._client._client.format_url( # pylint: disable=protected-access
url, **path_format_arguments
)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if timeout is not None:
query_parameters["timeout"] = self._serialize.query(
"timeout", timeout, "int", minimum=0
)
if _format is not None:
query_parameters["$format"] = self._serialize.query(
"format", _format, "str"
)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters["x-ms-version"] = self._serialize.header(
"self._config.version", self._config.version, "str"
)
if request_id_parameter is not None:
header_parameters["x-ms-client-request-id"] = self._serialize.header(
"request_id_parameter", request_id_parameter, "str"
)
header_parameters["DataServiceVersion"] = self._serialize.header(
"data_service_version", data_service_version, "str"
)
if if_match is not None:
header_parameters["If-Match"] = self._serialize.header(
"if_match", if_match, "str"
)
header_parameters["Content-Type"] = self._serialize.header(
"content_type", content_type, "str"
)
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
body_content_kwargs = {} # type: Dict[str, Any]
if table_entity_properties is not None:
body_content = self._serialize.body(table_entity_properties, "{object}")
else:
body_content = None
body_content_kwargs["content"] = body_content
request = self._client._client.put( # pylint: disable=protected-access
url, query_parameters, header_parameters, **body_content_kwargs
)
self.requests.append(request)
_batch_update_entity.metadata = {
"url": "/{table}(PartitionKey='{partitionKey}',RowKey='{rowKey}')"
} # type: ignore
def _batch_merge_entity(
self,
table, # type: str
partition_key, # type: str
row_key, # type: str
timeout=None, # type: Optional[int]
request_id_parameter=None, # type: Optional[str]
if_match=None, # type: Optional[str]
table_entity_properties=None, # type: Optional[EntityType]
query_options=None, # type: Optional["models.QueryOptions"]
**kwargs # type: Any
):
# type: (...) -> None
"""Merge entity in a table.
:param table: The name of the table.
:type table: str
:param partition_key: The partition key of the entity.
:type partition_key: str
:param row_key: The row key of the entity.
:type row_key: str
:param timeout: The timeout parameter is expressed in seconds.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when analytics logging is enabled.
:type request_id_parameter: str
:param if_match: Match condition for an entity to be updated. If specified and a matching
entity is not found, an error will be raised. To force an unconditional update, set to the
wildcard character (*). If not specified, an insert will be performed when no existing entity
is found to update and a merge will be performed if an existing entity is found.
:type if_match: str
:param table_entity_properties: The properties for the table entity.
:type table_entity_properties: dict[str, object]
:param query_options: Parameter group.
:type query_options: ~azure.data.tables.models.QueryOptions
:return: None
:rtype: None
"""
_format = None
if query_options is not None:
_format = query_options.format
data_service_version = "3.0"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._batch_merge_entity.metadata["url"] # type: ignore
path_format_arguments = {
"url": self._serialize.url(
"self._config.url", self._config.url, "str", skip_quote=True
),
"table": self._serialize.url("table", table, "str"),
"partitionKey": self._serialize.url("partition_key", partition_key, "str"),
"rowKey": self._serialize.url("row_key", row_key, "str"),
}
url = self._client._client.format_url( # pylint: disable=protected-access
url, **path_format_arguments
)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if timeout is not None:
query_parameters["timeout"] = self._serialize.query(
"timeout", timeout, "int", minimum=0
)
if _format is not None:
query_parameters["$format"] = self._serialize.query(
"format", _format, "str"
)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters["x-ms-version"] = self._serialize.header(
"self._config.version", self._config.version, "str"
)
if request_id_parameter is not None:
header_parameters["x-ms-client-request-id"] = self._serialize.header(
"request_id_parameter", request_id_parameter, "str"
)
header_parameters["DataServiceVersion"] = self._serialize.header(
"data_service_version", data_service_version, "str"
)
if if_match is not None:
header_parameters["If-Match"] = self._serialize.header(
"if_match", if_match, "str"
)
header_parameters["Content-Type"] = self._serialize.header(
"content_type", content_type, "str"
)
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
body_content_kwargs = {} # type: Dict[str, Any]
if table_entity_properties is not None:
body_content = self._serialize.body(table_entity_properties, "{object}")
else:
body_content = None
body_content_kwargs["content"] = body_content
request = self._client._client.patch( # pylint: disable=protected-access
url, query_parameters, header_parameters, **body_content_kwargs
)
if _is_cosmos_endpoint(url):
_transform_patch_to_cosmos_post(request)
self.requests.append(request)
_batch_merge_entity.metadata = {
"url": "/{table}(PartitionKey='{partitionKey}',RowKey='{rowKey}')"
}
def delete(
self,
entity, # type: EntityType
**kwargs # type: Any
):
# type: (...) -> None
"""Adds a delete operation to the current branch.
:param partition_key: The partition key of the entity.
:type partition_key: str
:param row_key: The row key of the entity.
:type row_key: str
:keyword str etag: Etag of the entity
:keyword match_condition: MatchCondition
:paramtype match_condition: ~azure.core.MatchCondition
:raises ValueError:
.. admonition:: Example:
.. literalinclude:: ../samples/sample_batching.py
:start-after: [START batching]
:end-before: [END batching]
:language: python
:dedent: 8
:caption: Creating and adding an entity to a Table
"""
self._verify_partition_key(entity)
temp = entity.copy()
partition_key = temp["PartitionKey"]
row_key = temp["RowKey"]
match_condition = kwargs.pop("match_condition", None)
etag = kwargs.pop("etag", None)
if match_condition and not etag:
try:
etag = entity.metadata.get("etag", None)
except (AttributeError, TypeError):
pass
if_match, _ = _get_match_headers(
kwargs=dict(
kwargs,
etag=etag,
match_condition=match_condition,
),
etag_param="etag",
match_param="match_condition",
)
self._batch_delete_entity(
table=self.table_name,
partition_key=partition_key,
row_key=row_key,
if_match=if_match or "*",
**kwargs
)
def _batch_delete_entity(
self,
table, # type: str
partition_key, # type: str
row_key, # type: str
if_match, # type: str
timeout=None, # type: Optional[int]
request_id_parameter=None, # type: Optional[str]
query_options=None, # type: Optional["models.QueryOptions"]
):
# type: (...) -> None
"""Deletes the specified entity in a table.
:param table: The name of the table.
:type table: str
:param partition_key: The partition key of the entity.
:type partition_key: str
:param row_key: The row key of the entity.
:type row_key: str
:param if_match: Match condition for an entity to be deleted. If specified and a matching
entity is not found, an error will be raised. To force an unconditional delete, set to the
wildcard character (*).
:type if_match: str
:param timeout: The timeout parameter is expressed in seconds.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when analytics logging is enabled.
:type request_id_parameter: str
:param query_options: Parameter group.
:type query_options: ~azure.data.tables.models.QueryOptions
:return: None
:rtype: None
"""
_format = None
if query_options is not None:
_format = query_options.format
data_service_version = "3.0"
accept = "application/json;odata=minimalmetadata"
# Construct URL
url = self._batch_delete_entity.metadata["url"] # type: ignore
path_format_arguments = {
"url": self._serialize.url(
"self._config.url", self._config.url, "str", skip_quote=True
),
"table": self._serialize.url("table", table, "str"),
"partitionKey": self._serialize.url("partition_key", partition_key, "str"),
"rowKey": self._serialize.url("row_key", row_key, "str"),
}
url = self._client._client.format_url( # pylint: disable=protected-access
url, **path_format_arguments
)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if timeout is not None:
query_parameters["timeout"] = self._serialize.query(
"timeout", timeout, "int", minimum=0
)
if _format is not None:
query_parameters["$format"] = self._serialize.query(
"format", _format, "str"
)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters["x-ms-version"] = self._serialize.header(
"self._config.version", self._config.version, "str"
)
if request_id_parameter is not None:
header_parameters["x-ms-client-request-id"] = self._serialize.header(
"request_id_parameter", request_id_parameter, "str"
)
header_parameters["DataServiceVersion"] = self._serialize.header(
"data_service_version", data_service_version, "str"
)
header_parameters["If-Match"] = self._serialize.header(
"if_match", if_match, "str"
)
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
request = self._client._client.delete( # pylint: disable=protected-access
url, query_parameters, header_parameters
)
self.requests.append(request)
_batch_delete_entity.metadata = {
"url": "/{table}(PartitionKey='{partitionKey}',RowKey='{rowKey}')"
}
def upsert(
self,
entity, # type: EntityType
mode=UpdateMode.MERGE, # type: Union[str, UpdateMode]
**kwargs # type: Any
):
# type: (...) -> None
"""Adds an upsert (update/merge) operation to the batch.
:param entity: The properties for the table entity.
:type entity: :class:`~azure.data.tables.TableEntity` or Dict[str,str]
:param mode: Merge or Replace entity
:type mode: :class:`~azure.data.tables.UpdateMode`
:raises ValueError:
.. admonition:: Example:
.. literalinclude:: ../samples/sample_batching.py
:start-after: [START batching]
:end-before: [END batching]
:language: python
:dedent: 8
:caption: Creating and adding an entity to a Table
"""
self._verify_partition_key(entity)
temp = entity.copy()
partition_key = temp["PartitionKey"]
row_key = temp["RowKey"]
temp = _add_entity_properties(temp)
if mode is UpdateMode.MERGE:
self._batch_merge_entity(
table=self.table_name,
partition_key=partition_key,
row_key=row_key,
table_entity_properties=temp,
**kwargs
)
elif mode is UpdateMode.REPLACE:
self._batch_update_entity(
table=self.table_name,
partition_key=partition_key,
row_key=row_key,
table_entity_properties=temp,
**kwargs
)
| 38.59104
| 103
| 0.595581
|
e92e89d9c08411a53430562ef0502b358ad60733
| 621
|
py
|
Python
|
user/models.py
|
Mariga123/solid-octo-robot
|
b4f9eb9dd2b552d021a6ef57b50e833ccf0e046e
|
[
"MIT"
] | null | null | null |
user/models.py
|
Mariga123/solid-octo-robot
|
b4f9eb9dd2b552d021a6ef57b50e833ccf0e046e
|
[
"MIT"
] | null | null | null |
user/models.py
|
Mariga123/solid-octo-robot
|
b4f9eb9dd2b552d021a6ef57b50e833ccf0e046e
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import User
from PIL import Image
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
image = models.ImageField(default='default.jpg', upload_to='profile_pics')
def __str__(self):
return f'{self.user.username} Profile'
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
img = Image.open(self.image.path)
if img.height > 300 or img.width > 300:
output_size =(300, 300)
img.thumbnail(output_size)
img.save(self.image.path)
| 28.227273
| 78
| 0.653784
|
591783720de51e0481b137d6fbd358a2fdc0df4e
| 10,564
|
py
|
Python
|
jiamtrader/app/cta_strategy/strategies/high_frequency_strategy.py
|
zxc1342802/leijmtrader
|
f24d5593d8708e48f2a9180d9469a6c2af93a08d
|
[
"MIT"
] | null | null | null |
jiamtrader/app/cta_strategy/strategies/high_frequency_strategy.py
|
zxc1342802/leijmtrader
|
f24d5593d8708e48f2a9180d9469a6c2af93a08d
|
[
"MIT"
] | null | null | null |
jiamtrader/app/cta_strategy/strategies/high_frequency_strategy.py
|
zxc1342802/leijmtrader
|
f24d5593d8708e48f2a9180d9469a6c2af93a08d
|
[
"MIT"
] | null | null | null |
from jiamtrader.app.cta_strategy import (
CtaTemplate,
StopOrder,
TickData,
BarData,
TradeData,
OrderData
)
from jiamtrader.app.cta_strategy.engine import CtaEngine
from jiamtrader.trader.event import EVENT_TIMER
from jiamtrader.event import Event
from jiamtrader.trader.object import Direction, Status
from jiamtrader.trader.object import GridPositionCalculator
class HighFrequencyStrategy(CtaTemplate):
"""
网格的高频策略,挂上下买卖单,等待成交,然后通过不断加仓降低均价
免责声明: 本策略仅供测试参考,本人不负有任何责任。使用前请熟悉代码。测试其中的bugs, 请清楚里面的功能后在使用。
币安邀请链接: https://www.binancezh.pro/cn/futures/ref/51bitquant
合约邀请码:51bitquant
"""
author = "51bitquant"
grid_step = 1.0
stop_multiplier = 15.0
trading_size = 1.0
max_pos = 15.0 # 最大的持仓数量.
stop_mins = 15.0 # 出现亏损是,暂停多长时间.
# 变量.
avg_price = 0.0
current_pos = 0.0
parameters = ["grid_step", "stop_multiplier", "trading_size", "max_pos", "stop_mins"]
variables = ["avg_price", "current_pos"]
def __init__(self, cta_engine: CtaEngine, strategy_name, vt_symbol, setting):
""""""
super().__init__(cta_engine, strategy_name, vt_symbol, setting)
self.position = GridPositionCalculator(grid_step=self.grid_step)
self.avg_price = self.position.avg_price
self.current_pos = self.position.pos
# orders
self.long_orders = []
self.short_orders = []
self.stop_orders = []
self.profit_orders = []
self.timer_count = 0
self.stop_loss_interval = 0
self.trigger_stop_loss = False
self.cancel_order_interval = 0
self.tick: TickData = None
self.last_filled_order: OrderData = None
def on_init(self):
"""
Callback when strategy is inited.
"""
self.write_log("策略初始化")
def on_start(self):
"""
Callback when strategy is started.
"""
self.write_log("策略启动")
self.cta_engine.event_engine.register(EVENT_TIMER, self.process_timer_event)
def on_stop(self):
"""
Callback when strategy is stopped.
"""
self.write_log("策略停止")
self.cta_engine.event_engine.unregister(EVENT_TIMER, self.process_timer_event)
def process_timer_event(self, event: Event):
self.timer_count += 1
if self.timer_count >= 60:
self.timer_count = 0
# 撤销止损单子.
for vt_id in self.stop_orders:
self.cancel_order(vt_id)
if self.trigger_stop_loss:
self.stop_loss_interval += 1
# 止盈的条件, 可以放到tick里面,也可以放到定时器这里.
if abs(self.position.pos) > 0 and self.tick:
if self.position.pos > 0 and len(self.profit_orders) == 0:
price = self.position.avg_price + self.grid_step
price = max(price, self.tick.ask_price_1 * (1 + 0.0001))
vts = self.sell(price, abs(self.position.pos))
self.profit_orders.extend(vts)
print(f"多头重新下止盈单子: {vts}@{price}")
elif self.position.pos < 0 and len(self.profit_orders) == 0:
price = self.position.avg_price - self.grid_step
price = min(price, self.tick.bid_price_1 * (1 - 0.0001))
vts = self.cover(price, abs(self.position.pos))
self.profit_orders.extend(vts)
print(f"空头重新下止盈单子: {vts}@{price}")
self.cancel_order_interval += 1
if self.cancel_order_interval >= 15:
self.cancel_order_interval = 0
if abs(self.position.pos) < self.trading_size and (len(self.long_orders) == 0 or len(self.short_orders) == 0):
self.cancel_all()
print("当前没有仓位,多空单子不对等,需要重新开始. 先撤销所有订单.")
elif 0 < abs(self.position.pos) < (self.max_pos * self.trading_size):
if self.position.pos > 0 and len(self.long_orders) == 0 and self.last_filled_order:
step = self.get_step()
price = self.last_filled_order.price - self.grid_step * step
price = min(price, self.tick.bid_price_1 * (1 - 0.0001))
ids = self.buy(price, self.trading_size)
self.long_orders.extend(ids)
elif self.position.pos < 0 and len(self.short_orders) == 0 and self.last_filled_order:
step = self.get_step()
price = self.last_filled_order.price + self.grid_step * step
price = max(price, self.tick.ask_price_1 * (1 + 0.0001))
ids = self.short(price, self.trading_size)
self.short_orders.extend(ids)
def on_tick(self, tick: TickData):
"""
Callback of new tick data update.
"""
self.tick = tick
if not self.trading:
return
if tick.bid_price_1 <= 0 or tick.ask_price_1 <= 0:
self.write_log(f"tick价格异常: bid1: {tick.bid_price_1}, ask1: {tick.ask_price_1}")
return
if abs(self.position.pos) < self.trading_size: # 仓位为零的情况.
if len(self.long_orders) == 0 and len(self.short_orders) == 0:
if self.trigger_stop_loss:
# 记录设置过的止损条件.
if self.stop_loss_interval < self.stop_mins * 60: # 休息15分钟.
return
else:
self.trigger_stop_loss = False
self.stop_loss_interval = 0
buy_price = tick.bid_price_1 - self.grid_step / 2
sell_price = tick.bid_price_1 + self.grid_step / 2
long_ids = self.buy(buy_price, self.trading_size)
short_ids = self.short(sell_price, self.trading_size)
self.long_orders.extend(long_ids)
self.short_orders.extend(short_ids)
print(f"开始新的一轮状态: long_orders: {long_ids}@{buy_price}, short_orders:{short_ids}@{sell_price}")
if abs(self.position.pos) >= (self.max_pos * self.trading_size) and len(self.stop_orders) == 0:
if self.position.pos > 0 and tick.ask_price_1 < self.position.avg_price - self.stop_multiplier * self.grid_step:
vt_ids = self.sell(tick.ask_price_1, abs(self.position.pos))
stop_price = self.position.avg_price - self.stop_multiplier * self.grid_step
self.stop_orders.extend(vt_ids)
self.trigger_stop_loss = True
print(f"下多头止损单: stop_price: {stop_price}stop@{tick.ask_price_1}")
elif self.position.pos < 0 and tick.bid_price_1 > self.position.avg_price + self.stop_multiplier * self.grid_step:
stop_price = self.position.avg_price + self.stop_multiplier * self.grid_step
vt_ids = self.cover(tick.bid_price_1, abs(self.position.pos))
self.stop_orders.extend(vt_ids)
self.trigger_stop_loss = True
print(f"下空头止损单: stop_price: {stop_price}stop@{tick.bid_price_1}")
def on_bar(self, bar: BarData):
"""
Callback of new bar data update.
"""
pass
def get_step(self) -> int:
pos = abs(self.position.pos)
if pos < 3 * self.trading_size:
return 1
elif pos < 5 * self.trading_size:
return 2
elif pos < 8 * self.trading_size:
return 3
elif pos < 11 * self.trading_size:
return 5
elif pos < 13 * self.trading_size:
return 6
return 8
def on_order(self, order: OrderData):
"""
Callback of new order data update.
"""
self.position.update_position(order)
self.current_pos = self.position.pos
self.avg_price = self.position.avg_price
if order.vt_orderid in self.long_orders:
if order.status == Status.ALLTRADED:
self.long_orders.remove(order.vt_orderid)
print("多头成交,撤销空头订单和止盈订单")
for vt_id in (self.short_orders + self.profit_orders):
self.cancel_order(vt_id)
self.last_filled_order = order
if self.position.pos > 0:
if abs(self.position.pos) < self.trading_size * self.max_pos:
if not self.tick:
return
step = self.get_step()
price = order.price - self.grid_step * step
price = min(price, self.tick.bid_price_1 * (1 - 0.0001))
ids = self.buy(price, self.trading_size)
self.long_orders.extend(ids)
print(f"多头仓位继续下多头订单: {ids}@{price}")
elif order.status in [Status.REJECTED, Status.CANCELLED]:
self.long_orders.remove(order.vt_orderid)
elif order.vt_orderid in self.short_orders:
if order.status == Status.ALLTRADED:
self.short_orders.remove(order.vt_orderid)
print("空头成交,撤销多头订单和止盈订单")
for vt_id in (self.long_orders + self.profit_orders):
self.cancel_order(vt_id)
self.last_filled_order = order
if self.position.pos < 0:
if abs(self.position.pos) < self.trading_size * self.max_pos:
if not self.tick:
return
step = self.get_step()
price = order.price + self.grid_step * step
price = max(price, self.tick.ask_price_1 * (1 + 0.0001))
ids = self.short(price, self.trading_size)
self.short_orders.extend(ids)
print(f"空头仓位继续下空头订单: {ids}@{price}")
elif order.status in [Status.REJECTED, Status.CANCELLED]:
self.short_orders.remove(order.vt_orderid) # remove orderid
elif order.vt_orderid in self.stop_orders:
if not order.is_active():
self.stop_orders.remove(order.vt_orderid)
elif order.vt_orderid in self.profit_orders:
if not order.is_active():
self.profit_orders.remove(order.vt_orderid)
self.put_event()
def on_trade(self, trade: TradeData):
"""
Callback of new trade data update.
"""
self.put_event()
def on_stop_order(self, stop_order: StopOrder):
"""
Callback of stop order update.
"""
pass
| 34.410423
| 126
| 0.573173
|
b0ed4fd7261ee2873b1de62d7710805a966ccac4
| 3,733
|
py
|
Python
|
binance_asyncio/websockets/streams.py
|
justdanyul/binance-asyncio
|
4cc3eaabc5f6d9d49351141ce90dcd6979f2c3ac
|
[
"MIT"
] | 10
|
2021-02-07T23:34:05.000Z
|
2022-02-05T16:51:00.000Z
|
binance_asyncio/websockets/streams.py
|
justdanyul/binance-asyncio
|
4cc3eaabc5f6d9d49351141ce90dcd6979f2c3ac
|
[
"MIT"
] | null | null | null |
binance_asyncio/websockets/streams.py
|
justdanyul/binance-asyncio
|
4cc3eaabc5f6d9d49351141ce90dcd6979f2c3ac
|
[
"MIT"
] | null | null | null |
from typing import Callable, Tuple
from abc import ABC, abstractmethod
import websockets
import json
class BaseStream(ABC):
uri = "wss://stream.binance.com:9443/ws"
last_id = 0
def __init__(self) -> None:
self.parameters = {}
self.active = True
self.active_id = None
self.socket_reference = None
async def start(self, handler: Callable, keep_alive=False):
self.active_id = BaseStream.last_id = BaseStream.last_id + 1
if not keep_alive:
await self._start(handler)
else:
while self.active:
try:
await self._start(handler)
except:
continue
async def _start(self, handler: Callable):
async with websockets.connect(BaseStream.uri) as websocket:
self.socket_reference = websocket
request = await self._get_request('SUBSCRIBE')
await websocket.send(request)
await websocket.recv()
async for message in websocket:
await handler(message)
@abstractmethod
async def get_stream_identifier(self) -> str:
pass
async def subscribe(self, symbol:str) -> None:
await self._subscribe(symbol)
async def _subscribe(self, *args:str):
await self._add_parameter(args)
if self.socket_reference is not None:
await self.socket_reference.send(await self._get_request('SUBSCRIBE'))
async def _get_request(self, type:str):
parameters = list(self.parameters.keys())
return json.dumps({
"method": type,
"params": parameters,
"id": self.active_id
})
async def _add_parameter(self, args):
parameter = (await self.get_stream_identifier()).format(*args)
self.parameters[parameter] = None
class AggregateTradeStream(BaseStream):
async def get_stream_identifier(self) -> str:
return "{}@aggTrade"
class TradeStream(BaseStream):
async def get_stream_identifier(self) -> str:
return "{}@trade"
class TickerStream(BaseStream):
async def get_stream_identifier(self) -> str:
return "{}@ticker"
class AllMarketTickerStream(BaseStream):
async def get_stream_identifier(self) -> str:
return "!ticker@arr"
class MiniTickerStream(BaseStream):
async def get_stream_identifier(self) -> str:
return "{}@miniTicker"
class AllMarketsMiniTickerStream(BaseStream):
async def get_stream_identifier(self) -> str:
return "!miniTicker@arr"
class KlineStream(BaseStream):
async def subscribe(self, symbol:str, interval:str) -> None:
arguments = [symbol, interval]
await super()._subscribe(*arguments)
async def get_stream_identifier(self) -> str:
return "{}@kline_{}"
class SymbolBookTickerStream(BaseStream):
async def get_stream_identifier(self) -> str:
return "{}@bookTicker"
class AllBookTickerStream(BaseStream):
async def get_stream_identifier(self) -> str:
return "!bookTicker"
class PartialBookDepthStream(BaseStream):
async def subscribe(self, symbol:str, levels:str, more_updates:bool=False) -> None:
arguments = [symbol, levels, "" if not more_updates else "@100ms"]
await super()._subscribe(*arguments)
async def get_stream_identifier(self) -> str:
return "{}@depth{}{}"
class DiffDepthStream(BaseStream):
async def subscribe(self, symbol:str, more_updates:bool=False) -> None:
arguments = [symbol, "" if not more_updates else "@100ms"]
await super()._subscribe(*arguments)
async def get_stream_identifier(self) -> str:
return "{}@depth{}"
| 32.745614
| 87
| 0.645325
|
a816a43eef720c6ff5bffa5737564efd43be9715
| 538
|
py
|
Python
|
plotly/validators/layout/ternary/baxis/_linewidth.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 12
|
2020-04-18T18:10:22.000Z
|
2021-12-06T10:11:15.000Z
|
plotly/validators/layout/ternary/baxis/_linewidth.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 27
|
2020-04-28T21:23:12.000Z
|
2021-06-25T15:36:38.000Z
|
plotly/validators/layout/ternary/baxis/_linewidth.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 6
|
2020-04-18T23:07:08.000Z
|
2021-11-18T07:53:06.000Z
|
import _plotly_utils.basevalidators
class LinewidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name='linewidth',
parent_name='layout.ternary.baxis',
**kwargs
):
super(LinewidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'plot'),
min=kwargs.pop('min', 0),
role=kwargs.pop('role', 'style'),
**kwargs
)
| 26.9
| 71
| 0.598513
|
f87d8d0a9cf99a010edf2b48793828005a3ef82a
| 5,574
|
py
|
Python
|
contrib/seeds/makeseeds.py
|
progof/elspero-core
|
8cb65df55bbc8c6412e841755b17e3da2deadc89
|
[
"MIT"
] | null | null | null |
contrib/seeds/makeseeds.py
|
progof/elspero-core
|
8cb65df55bbc8c6412e841755b17e3da2deadc89
|
[
"MIT"
] | null | null | null |
contrib/seeds/makeseeds.py
|
progof/elspero-core
|
8cb65df55bbc8c6412e841755b17e3da2deadc89
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 337600
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = set([
"130.211.129.106", "178.63.107.226",
"83.81.130.26", "88.198.17.7", "148.251.238.178", "176.9.46.6",
"54.173.72.127", "54.174.10.182", "54.183.64.54", "54.194.231.211",
"54.66.214.167", "54.66.220.137", "54.67.33.14", "54.77.251.214",
"54.94.195.96", "54.94.200.247"
])
import re
import sys
import dns.resolver
import collections
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(\/Satoshi:0\.8\.6\/|\/Satoshi:0\.9\.(2|3|4|5)\/|\/Satoshi:0\.10\.\d{1,2}\/|\/Satoshi:0\.11\.\d{1,2}\/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in hist.items() if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple elspero ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print '[%s]:%i' % (ip['ip'], ip['port'])
else:
print '%s:%i' % (ip['ip'], ip['port'])
if __name__ == '__main__':
main()
| 32.788235
| 186
| 0.5601
|
19fd6d5c91ccb2983415b8b90682ed4b8e4104be
| 32,067
|
py
|
Python
|
docker_stuff/site-packages/partio.py
|
yannikkellerde/Water-Pouring
|
82ae82dde8f14452c4c1d0d7defed105ac1fb0c7
|
[
"MIT"
] | null | null | null |
docker_stuff/site-packages/partio.py
|
yannikkellerde/Water-Pouring
|
82ae82dde8f14452c4c1d0d7defed105ac1fb0c7
|
[
"MIT"
] | null | null | null |
docker_stuff/site-packages/partio.py
|
yannikkellerde/Water-Pouring
|
82ae82dde8f14452c4c1d0d7defed105ac1fb0c7
|
[
"MIT"
] | 1
|
2021-04-27T09:56:52.000Z
|
2021-04-27T09:56:52.000Z
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info >= (2, 7, 0):
def swig_import_helper():
import importlib
pkg = __name__.rpartition('.')[0]
mname = '.'.join((pkg, '_partio')).lstrip('.')
try:
return importlib.import_module(mname)
except ImportError:
return importlib.import_module('_partio')
_partio = swig_import_helper()
del swig_import_helper
elif _swig_python_version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_partio', [dirname(__file__)])
except ImportError:
import _partio
return _partio
try:
_mod = imp.load_module('_partio', fp, pathname, description)
finally:
if fp is not None:
fp.close()
return _mod
_partio = swig_import_helper()
del swig_import_helper
else:
import _partio
del _swig_python_version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except __builtin__.Exception:
class _object:
pass
_newclass = 0
class SwigPyIterator(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SwigPyIterator, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _partio.delete_SwigPyIterator
__del__ = lambda self: None
def value(self):
return _partio.SwigPyIterator_value(self)
def incr(self, n=1):
return _partio.SwigPyIterator_incr(self, n)
def decr(self, n=1):
return _partio.SwigPyIterator_decr(self, n)
def distance(self, x):
return _partio.SwigPyIterator_distance(self, x)
def equal(self, x):
return _partio.SwigPyIterator_equal(self, x)
def copy(self):
return _partio.SwigPyIterator_copy(self)
def next(self):
return _partio.SwigPyIterator_next(self)
def __next__(self):
return _partio.SwigPyIterator___next__(self)
def previous(self):
return _partio.SwigPyIterator_previous(self)
def advance(self, n):
return _partio.SwigPyIterator_advance(self, n)
def __eq__(self, x):
return _partio.SwigPyIterator___eq__(self, x)
def __ne__(self, x):
return _partio.SwigPyIterator___ne__(self, x)
def __iadd__(self, n):
return _partio.SwigPyIterator___iadd__(self, n)
def __isub__(self, n):
return _partio.SwigPyIterator___isub__(self, n)
def __add__(self, n):
return _partio.SwigPyIterator___add__(self, n)
def __sub__(self, *args):
return _partio.SwigPyIterator___sub__(self, *args)
def __iter__(self):
return self
SwigPyIterator_swigregister = _partio.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
NONE = _partio.NONE
VECTOR = _partio.VECTOR
FLOAT = _partio.FLOAT
INT = _partio.INT
INDEXEDSTR = _partio.INDEXEDSTR
class ParticleAttribute(_object):
"""A handle for operating on attribbutes of a particle set"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ParticleAttribute, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ParticleAttribute, name)
__repr__ = _swig_repr
__swig_setmethods__["type"] = _partio.ParticleAttribute_type_set
__swig_getmethods__["type"] = _partio.ParticleAttribute_type_get
if _newclass:
type = _swig_property(_partio.ParticleAttribute_type_get, _partio.ParticleAttribute_type_set)
__swig_setmethods__["count"] = _partio.ParticleAttribute_count_set
__swig_getmethods__["count"] = _partio.ParticleAttribute_count_get
if _newclass:
count = _swig_property(_partio.ParticleAttribute_count_get, _partio.ParticleAttribute_count_set)
__swig_setmethods__["name"] = _partio.ParticleAttribute_name_set
__swig_getmethods__["name"] = _partio.ParticleAttribute_name_get
if _newclass:
name = _swig_property(_partio.ParticleAttribute_name_get, _partio.ParticleAttribute_name_set)
def __init__(self):
"""
__init__(ParticleAttribute self) -> ParticleAttribute
Attribute name
"""
this = _partio.new_ParticleAttribute()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _partio.delete_ParticleAttribute
__del__ = lambda self: None
ParticleAttribute_swigregister = _partio.ParticleAttribute_swigregister
ParticleAttribute_swigregister(ParticleAttribute)
class FixedAttribute(_object):
"""A handle for operating on fixed attribbutes of a particle set"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, FixedAttribute, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, FixedAttribute, name)
__repr__ = _swig_repr
__swig_setmethods__["type"] = _partio.FixedAttribute_type_set
__swig_getmethods__["type"] = _partio.FixedAttribute_type_get
if _newclass:
type = _swig_property(_partio.FixedAttribute_type_get, _partio.FixedAttribute_type_set)
__swig_setmethods__["count"] = _partio.FixedAttribute_count_set
__swig_getmethods__["count"] = _partio.FixedAttribute_count_get
if _newclass:
count = _swig_property(_partio.FixedAttribute_count_get, _partio.FixedAttribute_count_set)
__swig_setmethods__["name"] = _partio.FixedAttribute_name_set
__swig_getmethods__["name"] = _partio.FixedAttribute_name_get
if _newclass:
name = _swig_property(_partio.FixedAttribute_name_get, _partio.FixedAttribute_name_set)
def __init__(self):
"""
__init__(FixedAttribute self) -> FixedAttribute
Attribute name
"""
this = _partio.new_FixedAttribute()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _partio.delete_FixedAttribute
__del__ = lambda self: None
FixedAttribute_swigregister = _partio.FixedAttribute_swigregister
FixedAttribute_swigregister(FixedAttribute)
class ParticlesInfo(_object):
"""A set of particles with associated data attributes."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ParticlesInfo, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ParticlesInfo, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
def numParticles(self):
"""
numParticles(ParticlesInfo self) -> int
Returns the number of particles in the set
"""
return _partio.ParticlesInfo_numParticles(self)
def numAttributes(self):
"""
numAttributes(ParticlesInfo self) -> int
Returns the number of particles in the set
"""
return _partio.ParticlesInfo_numAttributes(self)
def numFixedAttributes(self):
"""
numFixedAttributes(ParticlesInfo self) -> int
Returns the number of fixed attributes
"""
return _partio.ParticlesInfo_numFixedAttributes(self)
def attributeInfo(self, *args):
"""
attributeInfo(ParticlesInfo self, char const * name) -> ParticleAttribute
attributeInfo(ParticlesInfo self, int const index) -> ParticleAttribute
Returns the attribute handle by index
"""
return _partio.ParticlesInfo_attributeInfo(self, *args)
def fixedAttributeInfo(self, *args):
"""
fixedAttributeInfo(ParticlesInfo self, char const * name) -> FixedAttribute
fixedAttributeInfo(ParticlesInfo self, int const index) -> FixedAttribute
Returns the fixed attribute handle by index
"""
return _partio.ParticlesInfo_fixedAttributeInfo(self, *args)
__swig_destroy__ = _partio.delete_ParticlesInfo
__del__ = lambda self: None
ParticlesInfo_swigregister = _partio.ParticlesInfo_swigregister
ParticlesInfo_swigregister(ParticlesInfo)
class ParticlesData(ParticlesInfo):
"""A reader for a set of particles."""
__swig_setmethods__ = {}
for _s in [ParticlesInfo]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, ParticlesData, name, value)
__swig_getmethods__ = {}
for _s in [ParticlesInfo]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, ParticlesData, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
def lookupIndexedStr(self, attribute, str):
"""
lookupIndexedStr(ParticlesData self, ParticleAttribute attribute, char const * str) -> int
Looks up a given indexed string given the index, returns -1 if not found
"""
return _partio.ParticlesData_lookupIndexedStr(self, attribute, str)
def lookupFixedIndexedStr(self, attribute, str):
"""
lookupFixedIndexedStr(ParticlesData self, FixedAttribute attribute, char const * str) -> int
Looks up a given fixed indexed string given the index, returns -1 if not found
"""
return _partio.ParticlesData_lookupFixedIndexedStr(self, attribute, str)
def findNPoints(self, center, nPoints, maxRadius):
"""
findNPoints(ParticlesData self, fixedFloatArray center, int nPoints, float maxRadius) -> PyObject *
Searches for the N nearest points to the center location
or as many as can be found within maxRadius distance.
"""
return _partio.ParticlesData_findNPoints(self, center, nPoints, maxRadius)
def findPoints(self, bboxMin, bboxMax):
"""
findPoints(ParticlesData self, fixedFloatArray bboxMin, fixedFloatArray bboxMax) -> PyObject *
Returns the indices of all points within the bounding
box defined by the two cube corners bboxMin and bboxMax
"""
return _partio.ParticlesData_findPoints(self, bboxMin, bboxMax)
def get(self, attr, particleIndex):
"""
get(ParticlesData self, ParticleAttribute attr, ParticleIndex const particleIndex) -> PyObject *
Gets attribute data for particleIndex'th particle
"""
return _partio.ParticlesData_get(self, attr, particleIndex)
def getFixed(self, attr):
"""
getFixed(ParticlesData self, FixedAttribute attr) -> PyObject *
Gets fixed attribute data
"""
return _partio.ParticlesData_getFixed(self, attr)
def indexedStrs(self, attr):
"""
indexedStrs(ParticlesData self, ParticleAttribute attr) -> PyObject *
Gets a list of all indexed strings for the given attribute handle
"""
return _partio.ParticlesData_indexedStrs(self, attr)
def fixedIndexedStrs(self, attr):
"""
fixedIndexedStrs(ParticlesData self, FixedAttribute attr) -> PyObject *
Gets a list of all indexed strings for the given fixed attribute handle
"""
return _partio.ParticlesData_fixedIndexedStrs(self, attr)
__swig_destroy__ = _partio.delete_ParticlesData
__del__ = lambda self: None
ParticlesData_swigregister = _partio.ParticlesData_swigregister
ParticlesData_swigregister(ParticlesData)
class ParticleIteratorTrue(_object):
"""A reader for a set of particles."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ParticleIteratorTrue, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ParticleIteratorTrue, name)
__repr__ = _swig_repr
def __init__(self):
"""
__init__(ParticleIterator<(true)> self) -> ParticleIteratorTrue
Return string name of given attribute type
"""
this = _partio.new_ParticleIteratorTrue()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _partio.delete_ParticleIteratorTrue
__del__ = lambda self: None
ParticleIteratorTrue_swigregister = _partio.ParticleIteratorTrue_swigregister
ParticleIteratorTrue_swigregister(ParticleIteratorTrue)
class ParticleIteratorFalse(_object):
"""A reader for a set of particles."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ParticleIteratorFalse, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ParticleIteratorFalse, name)
__repr__ = _swig_repr
def __init__(self):
"""
__init__(ParticleIterator<(false)> self) -> ParticleIteratorFalse
Return string name of given attribute type
"""
this = _partio.new_ParticleIteratorFalse()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _partio.delete_ParticleIteratorFalse
__del__ = lambda self: None
ParticleIteratorFalse_swigregister = _partio.ParticleIteratorFalse_swigregister
ParticleIteratorFalse_swigregister(ParticleIteratorFalse)
class ParticlesDataMutable(ParticlesData):
"""A writer for a set of particles."""
__swig_setmethods__ = {}
for _s in [ParticlesData]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, ParticlesDataMutable, name, value)
__swig_getmethods__ = {}
for _s in [ParticlesData]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, ParticlesDataMutable, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
def registerIndexedStr(self, attribute, str):
"""
registerIndexedStr(ParticlesDataMutable self, ParticleAttribute attribute, char const * str) -> int
Registers a string in the particular attribute
"""
return _partio.ParticlesDataMutable_registerIndexedStr(self, attribute, str)
def registerFixedIndexedStr(self, attribute, str):
"""
registerFixedIndexedStr(ParticlesDataMutable self, FixedAttribute attribute, char const * str) -> int
Registers a string in the particular fixed attribute
"""
return _partio.ParticlesDataMutable_registerFixedIndexedStr(self, attribute, str)
def setIndexedStr(self, attribute, particleAttributeHandle, str):
"""
setIndexedStr(ParticlesDataMutable self, ParticleAttribute attribute, int particleAttributeHandle, char const * str)
Changes a given index's associated string (for all particles that use this index too)
"""
return _partio.ParticlesDataMutable_setIndexedStr(self, attribute, particleAttributeHandle, str)
def setFixedIndexedStr(self, attribute, particleAttributeHandle, str):
"""
setFixedIndexedStr(ParticlesDataMutable self, FixedAttribute attribute, int particleAttributeHandle, char const * str)
Changes a given fixed index's associated string
"""
return _partio.ParticlesDataMutable_setFixedIndexedStr(self, attribute, particleAttributeHandle, str)
def sort(self):
"""
sort(ParticlesDataMutable self)
Prepares data for N nearest neighbor searches using the
attribute in the file with name 'position'
"""
return _partio.ParticlesDataMutable_sort(self)
def addAttribute(self, attribute, type, count):
"""
addAttribute(ParticlesDataMutable self, char const * attribute, ParticleAttributeType type, int const count) -> ParticleAttribute
Adds a new attribute of given name, type and count. If type is
partio.VECTOR, then count must be 3
"""
return _partio.ParticlesDataMutable_addAttribute(self, attribute, type, count)
def addFixedAttribute(self, attribute, type, count):
"""
addFixedAttribute(ParticlesDataMutable self, char const * attribute, ParticleAttributeType type, int const count) -> FixedAttribute
Adds a new fixed attribute of given name, type and count. If type is
partio.VECTOR, then count must be 3
"""
return _partio.ParticlesDataMutable_addFixedAttribute(self, attribute, type, count)
def addParticle(self):
"""
addParticle(ParticlesDataMutable self) -> ParticleIndex
Adds a new particle and returns the index
"""
return _partio.ParticlesDataMutable_addParticle(self)
def addParticles(self, count):
"""
addParticles(ParticlesDataMutable self, int const count) -> ParticleIteratorFalse
Adds count particles and returns the offset to the first one
"""
return _partio.ParticlesDataMutable_addParticles(self, count)
def set(self, attr, particleIndex, tuple):
"""
set(ParticlesDataMutable self, ParticleAttribute attr, uint64_t const particleIndex, PyObject * tuple) -> PyObject *
Sets data on a given attribute for a single particle.
Data must be specified as tuple.
"""
return _partio.ParticlesDataMutable_set(self, attr, particleIndex, tuple)
def setFixed(self, attr, tuple):
"""
setFixed(ParticlesDataMutable self, FixedAttribute attr, PyObject * tuple) -> PyObject *
Sets data on a given fixed attribute.
Data must be specified as tuple.
"""
return _partio.ParticlesDataMutable_setFixed(self, attr, tuple)
def ptr(self):
"""
ptr(ParticlesDataMutable self) -> unsigned long
Workaround to get the address to the ptr to help with interop python binding
"""
return _partio.ParticlesDataMutable_ptr(self)
__swig_destroy__ = _partio.delete_ParticlesDataMutable
__del__ = lambda self: None
ParticlesDataMutable_swigregister = _partio.ParticlesDataMutable_swigregister
ParticlesDataMutable_swigregister(ParticlesDataMutable)
def create():
"""
create() -> ParticlesDataMutable
Create an empty particle array
"""
return _partio.create()
def read(*args):
"""
read(char const * filename, bool verbose=True, std::ostream & error) -> ParticlesDataMutable
read(char const * filename, bool verbose=True) -> ParticlesDataMutable
read(char const * filename) -> ParticlesDataMutable
Reads a particle set from disk
"""
return _partio.read(*args)
def readVerbose(filename):
"""
readVerbose(char const * filename) -> PyObject *
Reads a particle set from disk and returns the tuple particleObject,errorMsg
"""
return _partio.readVerbose(filename)
def readHeadersVerbose(filename):
"""
readHeadersVerbose(char const * filename) -> PyObject *
Reads the header/attribute information from disk and returns the tuple particleObject,errorMsg
"""
return _partio.readHeadersVerbose(filename)
def readCachedVerbose(filename, sort):
"""
readCachedVerbose(char const * filename, bool sort) -> PyObject *
Reads the header/attribute information from disk and returns the tuple particleObject,errorMsg
"""
return _partio.readCachedVerbose(filename, sort)
def readHeaders(*args):
"""
readHeaders(char const * filename, bool verbose=True, std::ostream & error) -> ParticlesInfo
readHeaders(char const * filename, bool verbose=True) -> ParticlesInfo
readHeaders(char const * filename) -> ParticlesInfo
Reads a particle set headers from disk
"""
return _partio.readHeaders(*args)
def write(filename, arg2, arg3=False, arg4=True):
"""
write(char const * filename, ParticlesData arg2, bool const arg3=False, bool const arg4=True)
write(char const * filename, ParticlesData arg2, bool const arg3=False)
write(char const * filename, ParticlesData arg2)
Writes a particle set to disk
"""
return _partio.write(filename, arg2, arg3, arg4)
def _print(particles):
"""
_print(ParticlesData particles)
Print a summary of particle file
"""
return _partio._print(particles)
def computeClustering(particles, numNeighbors, radiusSearch, radiusInside, connections, density):
"""
computeClustering(ParticlesDataMutable particles, int const numNeighbors, double const radiusSearch, double const radiusInside, int const connections, double const density) -> ParticlesDataMutable
Creates a clustered particle set
"""
return _partio.computeClustering(particles, numNeighbors, radiusSearch, radiusInside, connections, density)
def merge(*args):
"""
merge(ParticlesDataMutable base, ParticlesData delta, std::string const & identifier)
merge(ParticlesDataMutable base, ParticlesData delta)
Merge two particle sets
"""
return _partio.merge(*args)
class attrNameMap(_object):
"""Merge two particle sets"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, attrNameMap, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, attrNameMap, name)
__repr__ = _swig_repr
def iterator(self):
"""
iterator(attrNameMap self) -> SwigPyIterator
Merge two particle sets
"""
return _partio.attrNameMap_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
"""
__nonzero__(attrNameMap self) -> bool
Merge two particle sets
"""
return _partio.attrNameMap___nonzero__(self)
def __bool__(self):
"""
__bool__(attrNameMap self) -> bool
Merge two particle sets
"""
return _partio.attrNameMap___bool__(self)
def __len__(self):
"""
__len__(attrNameMap self) -> std::map< std::string,std::string >::size_type
Merge two particle sets
"""
return _partio.attrNameMap___len__(self)
def __iter__(self):
return self.key_iterator()
def iterkeys(self):
return self.key_iterator()
def itervalues(self):
return self.value_iterator()
def iteritems(self):
return self.iterator()
def __getitem__(self, key):
"""
__getitem__(attrNameMap self, std::map< std::string,std::string >::key_type const & key) -> std::map< std::string,std::string >::mapped_type const &
Merge two particle sets
"""
return _partio.attrNameMap___getitem__(self, key)
def __delitem__(self, key):
"""
__delitem__(attrNameMap self, std::map< std::string,std::string >::key_type const & key)
Merge two particle sets
"""
return _partio.attrNameMap___delitem__(self, key)
def has_key(self, key):
"""
has_key(attrNameMap self, std::map< std::string,std::string >::key_type const & key) -> bool
Merge two particle sets
"""
return _partio.attrNameMap_has_key(self, key)
def keys(self):
"""
keys(attrNameMap self) -> PyObject *
Merge two particle sets
"""
return _partio.attrNameMap_keys(self)
def values(self):
"""
values(attrNameMap self) -> PyObject *
Merge two particle sets
"""
return _partio.attrNameMap_values(self)
def items(self):
"""
items(attrNameMap self) -> PyObject *
Merge two particle sets
"""
return _partio.attrNameMap_items(self)
def __contains__(self, key):
"""
__contains__(attrNameMap self, std::map< std::string,std::string >::key_type const & key) -> bool
Merge two particle sets
"""
return _partio.attrNameMap___contains__(self, key)
def key_iterator(self):
"""
key_iterator(attrNameMap self) -> SwigPyIterator
Merge two particle sets
"""
return _partio.attrNameMap_key_iterator(self)
def value_iterator(self):
"""
value_iterator(attrNameMap self) -> SwigPyIterator
Merge two particle sets
"""
return _partio.attrNameMap_value_iterator(self)
def __setitem__(self, *args):
"""
__setitem__(attrNameMap self, std::map< std::string,std::string >::key_type const & key)
__setitem__(attrNameMap self, std::map< std::string,std::string >::key_type const & key, std::map< std::string,std::string >::mapped_type const & x)
Merge two particle sets
"""
return _partio.attrNameMap___setitem__(self, *args)
def asdict(self):
"""
asdict(attrNameMap self) -> PyObject *
Merge two particle sets
"""
return _partio.attrNameMap_asdict(self)
def __init__(self, *args):
"""
__init__(std::map<(std::string,std::string)> self, std::less< std::string > const & arg2) -> attrNameMap
__init__(std::map<(std::string,std::string)> self) -> attrNameMap
__init__(std::map<(std::string,std::string)> self, attrNameMap arg2) -> attrNameMap
Merge two particle sets
"""
this = _partio.new_attrNameMap(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def empty(self):
"""
empty(attrNameMap self) -> bool
Merge two particle sets
"""
return _partio.attrNameMap_empty(self)
def size(self):
"""
size(attrNameMap self) -> std::map< std::string,std::string >::size_type
Merge two particle sets
"""
return _partio.attrNameMap_size(self)
def swap(self, v):
"""
swap(attrNameMap self, attrNameMap v)
Merge two particle sets
"""
return _partio.attrNameMap_swap(self, v)
def begin(self):
"""
begin(attrNameMap self) -> std::map< std::string,std::string >::iterator
Merge two particle sets
"""
return _partio.attrNameMap_begin(self)
def end(self):
"""
end(attrNameMap self) -> std::map< std::string,std::string >::iterator
Merge two particle sets
"""
return _partio.attrNameMap_end(self)
def rbegin(self):
"""
rbegin(attrNameMap self) -> std::map< std::string,std::string >::reverse_iterator
Merge two particle sets
"""
return _partio.attrNameMap_rbegin(self)
def rend(self):
"""
rend(attrNameMap self) -> std::map< std::string,std::string >::reverse_iterator
Merge two particle sets
"""
return _partio.attrNameMap_rend(self)
def clear(self):
"""
clear(attrNameMap self)
Merge two particle sets
"""
return _partio.attrNameMap_clear(self)
def get_allocator(self):
"""
get_allocator(attrNameMap self) -> std::map< std::string,std::string >::allocator_type
Merge two particle sets
"""
return _partio.attrNameMap_get_allocator(self)
def count(self, x):
"""
count(attrNameMap self, std::map< std::string,std::string >::key_type const & x) -> std::map< std::string,std::string >::size_type
Merge two particle sets
"""
return _partio.attrNameMap_count(self, x)
def erase(self, *args):
"""
erase(attrNameMap self, std::map< std::string,std::string >::key_type const & x) -> std::map< std::string,std::string >::size_type
erase(attrNameMap self, std::map< std::string,std::string >::iterator position)
erase(attrNameMap self, std::map< std::string,std::string >::iterator first, std::map< std::string,std::string >::iterator last)
Merge two particle sets
"""
return _partio.attrNameMap_erase(self, *args)
def find(self, x):
"""
find(attrNameMap self, std::map< std::string,std::string >::key_type const & x) -> std::map< std::string,std::string >::iterator
Merge two particle sets
"""
return _partio.attrNameMap_find(self, x)
def lower_bound(self, x):
"""
lower_bound(attrNameMap self, std::map< std::string,std::string >::key_type const & x) -> std::map< std::string,std::string >::iterator
Merge two particle sets
"""
return _partio.attrNameMap_lower_bound(self, x)
def upper_bound(self, x):
"""
upper_bound(attrNameMap self, std::map< std::string,std::string >::key_type const & x) -> std::map< std::string,std::string >::iterator
Merge two particle sets
"""
return _partio.attrNameMap_upper_bound(self, x)
__swig_destroy__ = _partio.delete_attrNameMap
__del__ = lambda self: None
attrNameMap_swigregister = _partio.attrNameMap_swigregister
attrNameMap_swigregister(attrNameMap)
def cloneSchema(other, attrNameMap=None):
"""
cloneSchema(ParticlesData other, attrNameMap attrNameMap=None) -> ParticlesDataMutable
cloneSchema(ParticlesData other) -> ParticlesDataMutable
Clone a particle set's attribute schema
"""
return _partio.cloneSchema(other, attrNameMap)
def clone(other, particles, attrNameMap=None):
"""
clone(ParticlesData other, bool particles, attrNameMap attrNameMap=None) -> ParticlesDataMutable
clone(ParticlesData other, bool particles) -> ParticlesDataMutable
Clone a particle set
"""
return _partio.clone(other, particles, attrNameMap)
def TypeName(attrType):
"""
TypeName(ParticleAttributeType attrType) -> std::string
Return string name of given attribute type
"""
return _partio.TypeName(attrType)
# This file is compatible with both classic and new-style classes.
| 32.131263
| 200
| 0.675991
|
19689cab43917be3ec3fe3c2bb8986ec64c9fc79
| 17,861
|
py
|
Python
|
pythontutorials/Udacity/CS101/Lesson 11 - How to Manage Data/Q43-Finishing Crawl Web.py
|
JoseALermaIII/python-tutorials
|
9d6cb78beec0bb55e27c49da1217317ba4d5f4fc
|
[
"MIT"
] | 2
|
2017-04-20T02:57:19.000Z
|
2018-10-12T20:15:47.000Z
|
pythontutorials/Udacity/CS101/Lesson 11 - How to Manage Data/Q43-Finishing Crawl Web.py
|
JoseALermaIII/python-tutorials
|
9d6cb78beec0bb55e27c49da1217317ba4d5f4fc
|
[
"MIT"
] | 8
|
2021-03-18T21:50:16.000Z
|
2022-03-11T23:38:01.000Z
|
pythontutorials/Udacity/CS101/Lesson 11 - How to Manage Data/Q43-Finishing Crawl Web.py
|
JoseALermaIII/python-tutorials
|
9d6cb78beec0bb55e27c49da1217317ba4d5f4fc
|
[
"MIT"
] | 3
|
2018-08-30T20:30:50.000Z
|
2022-01-18T13:40:51.000Z
|
# Finish crawl web
def get_page(url):
# This is a simulated get_page procedure so that you can test your
# code on two pages "http://xkcd.com/353" and "http://xkcd.com/554".
# A procedure which actually grabs a page from the web will be
# introduced in unit 4.
try:
if url == "http://xkcd.com/353":
return '<?xml version="1.0" encoding="utf-8" ?><?xml-stylesheet href="http://imgs.xkcd.com/s/c40a9f8.css" type="text/css" media="screen" ?><!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"><html xmlns="http://www.w3.org/1999/xhtml"> <head> <title>xkcd: Python</title> <link rel="stylesheet" type="text/css" href="http://imgs.xkcd.com/s/c40a9f8.css" media="screen" title="Default" /> <!--[if IE]><link rel="stylesheet" type="text/css" href="http://imgs.xkcd.com/s/ecbbecc.css" media="screen" title="Default" /><![endif]--> <link rel="alternate" type="application/atom+xml" title="Atom 1.0" href="/atom.xml" /> <link rel="alternate" type="application/rss+xml" title="RSS 2.0" href="/rss.xml" /> <link rel="icon" href="http://imgs.xkcd.com/s/919f273.ico" type="image/x-icon" /> <link rel="shortcut icon" href="http://imgs.xkcd.com/s/919f273.ico" type="image/x-icon" /> </head> <body> <div id="container"> <div id="topContainer"> <div id="topLeft" class="dialog"> <div class="hd"><div class="c"></div></div> <div class="bd"> <div class="c"> <div class="s">\t<ul> <li><a href="http://xkcd.com/554"">Archive</a><br /></li>\t <li><a href="http://blag.xkcd.com/">News/Blag</a><br /></li> <li><a href="http://store.xkcd.com/">Store</a><br /></li> <li><a href="/about/">About</a><br /></li> <li><a href="http://forums.xkcd.com/">Forums</a><br /></li> </ul> </div> </div> </div> <div class="ft"><div class="c"></div></div> </div> <div id="topRight" class="dialog"> <div class="hd"><div class="c"></div></div> <div class="bd"> <div class="c"> <div class="s"> <div id="topRightContainer"> <div id="logo"> <a href="/"><img src="http://imgs.xkcd.com/s/9be30a7.png" alt="xkcd.com logo" height="83" width="185"/></a> <h2><br />A webcomic of romance,<br/> sarcasm, math, and language.</h2> <div class="clearleft"></div> <br />XKCD updates every Monday, Wednesday, and Friday. </div> </div> </div> </div> </div> <div class="ft"><div class="c"></div></div> </div> </div> <div id="contentContainer"> <div id="middleContent" class="dialog"> <div class="hd"><div class="c"></div></div> <div class="bd"> <div class="c"> <div class="s"><h1>Python</h1><br/><br /><div class="menuCont"> <ul> <li><a href="/1/">|<</a></li> <li><a href="/352/" accesskey="p">< Prev</a></li> <li><a href="http://dynamic.xkcd.com/random/comic/" id="rnd_btn_t">Random</a></li> <li><a href="/354/" accesskey="n">Next ></a></li> <li><a href="/">>|</a></li> </ul></div><br/><br/><img src="http://imgs.xkcd.com/comics/python.png" title="I wrote 20 short programs in Python yesterday. It was wonderful. Perl, Im leaving you." alt="Python" /><br/><br/><div class="menuCont"> <ul> <li><a href="/1/">|<</a></li> <li><a href="/352/" accesskey="p">< Prev</a></li> <li><a href="http://dynamic.xkcd.com/random/comic/" id="rnd_btn_b">Random</a></li> <li><a href="/354/" accesskey="n">Next ></a></li> <li><a href="/">>|</a></li> </ul></div><h3>Permanent link to this comic: http://xkcd.com/353/</h3><h3>Image URL (for hotlinking/embedding): http://imgs.xkcd.com/comics/python.png</h3><div id="transcript" style="display: none">[[ Guy 1 is talking to Guy 2, who is floating in the sky ]]Guy 1: You39;re flying! How?Guy 2: Python!Guy 2: I learned it last night! Everything is so simple!Guy 2: Hello world is just 39;print "Hello, World!" 39;Guy 1: I dunno... Dynamic typing? Whitespace?Guy 2: Come join us! Programming is fun again! It39;s a whole new world up here!Guy 1: But how are you flying?Guy 2: I just typed 39;import antigravity39;Guy 1: That39;s it?Guy 2: ...I also sampled everything in the medicine cabinet for comparison.Guy 2: But i think this is the python.{{ I wrote 20 short programs in Python yesterday. It was wonderful. Perl, I39;m leaving you. }}</div> </div> </div> </div> <div class="ft"><div class="c"></div></div> </div> <div id="middleFooter" class="dialog"> <div class="hd"><div class="c"></div></div> <div class="bd"> <div class="c"> <div class="s"> <img src="http://imgs.xkcd.com/s/a899e84.jpg" width="520" height="100" alt="Selected Comics" usemap=" comicmap" /> <map name="comicmap"> <area shape="rect" coords="0,0,100,100" href="/150/" alt="Grownups" /> <area shape="rect" coords="104,0,204,100" href="/730/" alt="Circuit Diagram" /> <area shape="rect" coords="208,0,308,100" href="/162/" alt="Angular Momentum" /> <area shape="rect" coords="312,0,412,100" href="/688/" alt="Self-Description" /> <area shape="rect" coords="416,0,520,100" href="/556/" alt="Alternative Energy Revolution" /> </map><br/><br />Search comic titles and transcripts:<br /><script type="text/javascript" src="//www.google.com/jsapi"></script><script type="text/javascript"> google.load(\"search\", \"1\"); google.setOnLoadCallback(function() { google.search.CustomSearchControl.attachAutoCompletion( \"012652707207066138651:zudjtuwe28q\", document.getElementById(\"q\"), \"cse-search-box\"); });</script><form action="//www.google.com/cse" id="cse-search-box"> <div> <input type="hidden" name="cx" value="012652707207066138651:zudjtuwe28q" /> <input type="hidden" name="ie" value="UTF-8" /> <input type="text" name="q" id="q" autocomplete="off" size="31" /> <input type="submit" name="sa" value="Search" /> </div></form><script type="text/javascript" src="//www.google.com/cse/brand?form=cse-search-box&lang=en"></script><a href="/rss.xml">RSS Feed</a> - <a href="/atom.xml">Atom Feed</a><br /> <br/> <div id="comicLinks"> Comics I enjoy:<br/> <a href="http://www.qwantz.com">Dinosaur Comics</a>, <a href="http://www.asofterworld.com">A Softer World</a>, <a href="http://pbfcomics.com/">Perry Bible Fellowship</a>, <a href="http://www.boltcity.com/copper/">Copper</a>, <a href="http://questionablecontent.net/">Questionable Content</a>, <a href="http://achewood.com/">Achewood</a>, <a href="http://wondermark.com/">Wondermark</a>, <a href="http://thisisindexed.com/">Indexed</a>, <a href="http://www.buttercupfestival.com/buttercupfestival.htm">Buttercup Festival</a> </div> <br/> Warning: this comic occasionally contains strong language (which may be unsuitable for children), unusual humor (which may be unsuitable for adults), and advanced mathematics (which may be unsuitable for liberal-arts majors).<br/> <br/> <h4>We did not invent the algorithm. The algorithm consistently finds Jesus. The algorithm killed Jeeves. <br />The algorithm is banned in China. The algorithm is from Jersey. The algorithm constantly finds Jesus.<br />This is not the algorithm. This is close.</h4><br/> <div class="line"></div> <br/> <div id="licenseText"> <!-- <a rel="license" href="http://creativecommons.org/licenses/by-nc/2.5/"><img alt="Creative Commons License" style="border:none" src="http://imgs.xkcd.com/static/somerights20.png" /></a><br/> --> This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-nc/2.5/">Creative Commons Attribution-NonCommercial 2.5 License</a>.<!-- <rdf:RDF xmlns="http://web.resource.org/cc/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:dcterms="http://purl.org/dc/terms/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns "><Work rdf:about=""><dc:creator>Randall Munroe</dc:creator><dcterms:rightsHolder>Randall Munroe</dcterms:rightsHolder><dc:type rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:source rdf:resource="http://www.xkcd.com/"/><license rdf:resource="http://creativecommons.org/licenses/by-nc/2.5/" /></Work><License rdf:about="http://creativecommons.org/licenses/by-nc/2.5/"><permits rdf:resource="http://web.resource.org/cc/Reproduction" /><permits rdf:resource="http://web.resource.org/cc/Distribution" /><requires rdf:resource="http://web.resource.org/cc/Notice" /><requires rdf:resource="http://web.resource.org/cc/Attribution" /><prohibits rdf:resource="http://web.resource.org/cc/CommercialUse" /><permits rdf:resource="http://web.resource.org/cc/DerivativeWorks" /></License></rdf:RDF> --> <br/> This means you\"re free to copy and share these comics (but not to sell them). <a href="/license.html">More details</a>.<br/> </div> </div> </div> </div> <div class="ft"><div class="c"></div></div> </div> </div> </div> </body></html> '
elif url == "http://xkcd.com/554":
return '<?xml version="1.0" encoding="utf-8" ?> <?xml-stylesheet href="http://imgs.xkcd.com/s/c40a9f8.css" type="text/css" media="screen" ?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <title>xkcd: Not Enough Work</title> <link rel="stylesheet" type="text/css" href="http://imgs.xkcd.com/s/c40a9f8.css" media="screen" title="Default" /> <!--[if IE]><link rel="stylesheet" type="text/css" href="http://imgs.xkcd.com/s/ecbbecc.css" media="screen" title="Default" /><![endif]--> <link rel="alternate" type="application/atom+xml" title="Atom 1.0" href="/atom.xml" /> <link rel="alternate" type="application/rss+xml" title="RSS 2.0" href="/rss.xml" /> <link rel="icon" href="http://imgs.xkcd.com/s/919f273.ico" type="image/x-icon" /> <link rel="shortcut icon" href="http://imgs.xkcd.com/s/919f273.ico" type="image/x-icon" /> </head> <body> <div id="container"> <div id="topContainer"> <div id="topLeft" class="dialog"> <div class="hd"><div class="c"></div></div> <div class="bd"> <div class="c"> <div class="s"> <ul> <li><a href="/archive/">Archive</a><br /></li> <li><a href="http://blag.xkcd.com/">News/Blag</a><br /></li> <li><a href="http://store.xkcd.com/">Store</a><br /></li> <li><a href="/about/">About</a><br /></li> <li><a href="http://forums.xkcd.com/">Forums</a><br /></li> </ul> </div> </div> </div> <div class="ft"><div class="c"></div></div> </div> <div id="topRight" class="dialog"> <div class="hd"><div class="c"></div></div> <div class="bd"> <div class="c"> <div class="s"> <div id="topRightContainer"> <div id="logo"> <a href="/"><img src="http://imgs.xkcd.com/s/9be30a7.png" alt="xkcd.com logo" height="83" width="185"/></a> <h2><br />A webcomic of romance,<br/> sarcasm, math, and language.</h2> <div class="clearleft"></div> XKCD updates every Monday, Wednesday, and Friday. <br /> Blag: Remember geohashing? <a href="http://blog.xkcd.com/2012/02/27/geohashing-2/">Something pretty cool</a> happened Sunday. </div> </div> </div> </div> </div> <div class="ft"><div class="c"></div></div> </div> </div> <div id="contentContainer"> <div id="middleContent" class="dialog"> <div class="hd"><div class="c"></div></div> <div class="bd"> <div class="c"> <div class="s"> <h1>Not Enough Work</h1><br/> <br /> <div class="menuCont"> <ul> <li><a href="/1/">|<</a></li> <li><a href="/553/" accesskey="p">< Prev</a></li> <li><a href="http://dynamic.xkcd.com/random/comic/" id="rnd_btn_t">Random</a></li> <li><a href="/555/" accesskey="n">Next ></a></li> <li><a href="/">>|</a></li> </ul> </div> <br/> <br/> <img src="http://imgs.xkcd.com/comics/not_enough_work.png" title="It39;s even harder if you39;re an asshole who pronounces <> brackets." alt="Not Enough Work" /><br/> <br/> <div class="menuCont"> <ul> <li><a href="/1/">|<</a></li> <li><a href="/553/" accesskey="p">< Prev</a></li> <li><a href="http://dynamic.xkcd.com/random/comic/" id="rnd_btn_b">Random</a></li> <li><a href="/555/" accesskey="n">Next ></a></li> <li><a href="/">>|</a></li> </ul> </div> <h3>Permanent link to this comic: http://xkcd.com/554/</h3> <h3>Image URL (for hotlinking/embedding): http://imgs.xkcd.com/comics/not_enough_work.png</h3> <div id="transcript" style="display: none">Narration: Signs your coders don39;t have enough work to do: [[A man sitting at his workstation; a female co-worker behind him]] Man: I39;m almost up to my old typing speed in dvorak [[Two men standing by a server rack]] Man 1: Our servers now support gopher. Man 1: Just in case. [[A woman standing near her workstation speaking to a male co-worker]] Woman: Our pages are now HTML, XHTML-STRICT, and haiku-compliant Man: Haiku? Woman: <div class="main"> Woman: <span id="marquee"> Woman: Blog!< span>< div> [[A woman sitting at her workstation]] Woman: Hey! Have you guys seen this webcomic? {{title text: It39;s even harder if you39;re an asshole who pronounces <> brackets.}}</div> </div> </div> </div> <div class="ft"><div class="c"></div></div> </div> <div id="middleFooter" class="dialog"> <div class="hd"><div class="c"></div></div> <div class="bd"> <div class="c"> <div class="s"> <img src="http://imgs.xkcd.com/s/a899e84.jpg" width="520" height="100" alt="Selected Comics" usemap=" comicmap" /> <map name="comicmap"> <area shape="rect" coords="0,0,100,100" href="/150/" alt="Grownups" /> <area shape="rect" coords="104,0,204,100" href="/730/" alt="Circuit Diagram" /> <area shape="rect" coords="208,0,308,100" href="/162/" alt="Angular Momentum" /> <area shape="rect" coords="312,0,412,100" href="/688/" alt="Self-Description" /> <area shape="rect" coords="416,0,520,100" href="/556/" alt="Alternative Energy Revolution" /> </map><br/><br /> Search comic titles and transcripts:<br /> <script type="text/javascript" src="//www.google.com/jsapi"></script> <script type="text/javascript"> google.load("search", "1"); google.search.CustomSearchControl.attachAutoCompletion( "012652707207066138651:zudjtuwe28q", document.getElementById("q"), "cse-search-box"); }); </script> <form action="//www.google.com/cse" id="cse-search-box"> <div> <input type="hidden" name="cx" value="012652707207066138651:zudjtuwe28q" /> <input type="hidden" name="ie" value="UTF-8" /> <input type="text" name="q" id="q" autocomplete="off" size="31" /> <input type="submit" name="sa" value="Search" /> </div> </form> <script type="text/javascript" src="//www.google.com/cse/brand?form=cse-search-box&lang=en"></script> <a href="/rss.xml">RSS Feed</a> - <a href="/atom.xml">Atom Feed</a> <br /> <br/> <div id="comicLinks"> Comics I enjoy:<br/> <a href="http://threewordphrase.com/">Three Word Phrase</a>, <a href="http://oglaf.com/">Oglaf</a> (nsfw), <a href="http://www.smbc-comics.com/">SMBC</a>, <a href="http://www.qwantz.com">Dinosaur Comics</a>, <a href="http://www.asofterworld.com">A Softer World</a>, <a href="http://buttersafe.com/">Buttersafe</a>, <a href="http://pbfcomics.com/">Perry Bible Fellowship</a>, <a href="http://questionablecontent.net/">Questionable Content</a>, <a href="http://www.buttercupfestival.com/buttercupfestival.htm">Buttercup Festival</a> </div> <br/> Warning: this comic occasionally contains strong language (which may be unsuitable for children), unusual humor (which may be unsuitable for adults), and advanced mathematics (which may be unsuitable for liberal-arts majors).<br/> <br/> <h4>We did not invent the algorithm. The algorithm consistently finds Jesus. The algorithm killed Jeeves. <br />The algorithm is banned in China. The algorithm is from Jersey. The algorithm constantly finds Jesus.<br />This is not the algorithm. This is close.</h4><br/> <div class="line"></div> <br/> <div id="licenseText"> <!-- <a rel="license" href="http://creativecommons.org/licenses/by-nc/2.5/"><img alt="Creative Commons License" style="border:none" src="http://imgs.xkcd.com/static/somerights20.png" /></a><br/> --> This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-nc/2.5/">Creative Commons Attribution-NonCommercial 2.5 License</a>. <!-- <rdf:RDF xmlns="http://web.resource.org/cc/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:dcterms="http://purl.org/dc/terms/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns "><Work rdf:about=""><dc:creator>Randall Munroe</dc:creator><dcterms:rightsHolder>Randall Munroe</dcterms:rightsHolder><dc:type rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:source rdf:resource="http://www.xkcd.com/"/><license rdf:resource="http://creativecommons.org/licenses/by-nc/2.5/" /></Work><License rdf:about="http://creativecommons.org/licenses/by-nc/2.5/"><permits rdf:resource="http://web.resource.org/cc/Reproduction" /><permits rdf:resource="http://web.resource.org/cc/Distribution" /><requires rdf:resource="http://web.resource.org/cc/Notice" /><requires rdf:resource="http://web.resource.org/cc/Attribution" /><prohibits rdf:resource="http://web.resource.org/cc/CommercialUse" /><permits rdf:resource="http://web.resource.org/cc/DerivativeWorks" /></License></rdf:RDF> --> <br/> This means you"re free to copy and share these comics (but not to sell them). <a href="/license.html">More details</a>.<br/> </div> </div> </div> </div> <div class="ft"><div class="c"></div></div> </div> </div> </div> </body> </html> '
except:
return ""
return ""
def get_next_target(page):
start_link = page.find('<a href=')
if start_link == -1:
return None, 0
start_quote = page.find('"', start_link)
end_quote = page.find('"', start_quote + 1)
url = page[start_quote + 1:end_quote]
return url, end_quote
def union(p,q):
for e in q:
if e not in p:
p.append(e)
def get_all_links(page):
links = []
while True:
url,endpos = get_next_target(page)
if url:
links.append(url)
page = page[endpos:]
else:
break
return links
def crawl_web(seed):
tocrawl = [seed]
crawled = []
while tocrawl:
page = tocrawl.pop()
if page not in crawled:
union(tocrawl, get_all_links(get_page(page)))
crawled.append(page)
return crawled
# Test
print crawl_web("http://xkcd.com/353")
| 302.728814
| 8,386
| 0.668776
|
2b7e4285471db85667d3b52a1c7debb85d01a475
| 20,063
|
py
|
Python
|
flanker/addresslib/parser.py
|
meta-x/flanker
|
1e37baa1db2ecee238ac3de1e36a2948e0a6d3ad
|
[
"Apache-2.0"
] | null | null | null |
flanker/addresslib/parser.py
|
meta-x/flanker
|
1e37baa1db2ecee238ac3de1e36a2948e0a6d3ad
|
[
"Apache-2.0"
] | null | null | null |
flanker/addresslib/parser.py
|
meta-x/flanker
|
1e37baa1db2ecee238ac3de1e36a2948e0a6d3ad
|
[
"Apache-2.0"
] | null | null | null |
# coding:utf-8
'''
_AddressParser is an implementation of a recursive descent parser for email
addresses and urls. While _AddressParser can be used directly it is not
recommended, use the the parse() and parse_list() methods which are provided
in the address module for convenience.
The grammar supported by the parser (as well as other limitations) are
outlined below. Plugins are also supported to allow for custom more
restrictive grammar that is typically seen at large Email Service Providers
(ESPs).
For email addresses, the grammar tries to stick to RFC 5322 as much as
possible, but includes relaxed (lax) grammar as well to support for common
realistic uses of email addresses on the Internet.
Grammar:
address-list -> address { delimiter address }
mailbox -> name-addr-rfc | name-addr-lax | addr-spec | url
name-addr-rfc -> [ display-name-rfc ] angle-addr-rfc
display-name-rfc -> [ whitespace ] word { whitespace word }
angle-addr-rfc -> [ whitespace ] < addr-spec > [ whitespace ]
name-addr-lax -> [ display-name-lax ] angle-addr-lax
display-name-lax -> [ whitespace ] word { whitespace word } whitespace
angle-addr-lax -> addr-spec [ whitespace ]
addr-spec -> [ whitespace ] local-part @ domain [ whitespace ]
local-part -> dot-atom | quoted-string
domain -> dot-atom
word -> word-ascii | word-unicode
word-ascii -> atom | quoted-string
word-unicode -> unicode-atom | unicode-qstring
whitespace -> whitespace-ascii | whitespace-unicode
Additional limitations on email addresses:
1. local-part:
* Must not be greater than 64 octets
2. domain:
* No more than 127 levels
* Each level no more than 63 octets
* Texual representation can not exceed 253 characters
* No level can being or end with -
3. Maximum mailbox length is len(local-part) + len('@') + len(domain) which
is 64 + 1 + 253 = 318 characters. Allow 194 characters for a display
name and the (very generous) limit becomes 512 characters. Allow 1024
mailboxes and the total limit on a mailbox-list is 524288 characters.
'''
import re
import flanker.addresslib.address
from flanker.addresslib.tokenizer import TokenStream
from flanker.addresslib.tokenizer import LBRACKET
from flanker.addresslib.tokenizer import AT_SYMBOL
from flanker.addresslib.tokenizer import RBRACKET
from flanker.addresslib.tokenizer import DQUOTE
from flanker.addresslib.tokenizer import BAD_DOMAIN
from flanker.addresslib.tokenizer import DELIMITER
from flanker.addresslib.tokenizer import RELAX_ATOM
from flanker.addresslib.tokenizer import WHITESPACE
from flanker.addresslib.tokenizer import UNI_WHITE
from flanker.addresslib.tokenizer import ATOM
from flanker.addresslib.tokenizer import UNI_ATOM
from flanker.addresslib.tokenizer import UNI_QSTR
from flanker.addresslib.tokenizer import DOT_ATOM
from flanker.addresslib.tokenizer import QSTRING
from flanker.addresslib.tokenizer import URL
from flanker.mime.message.headers.encoding import encode_string
from flanker.utils import is_pure_ascii
from flanker.utils import contains_control_chars
from flanker.utils import cleanup_display_name
from flanker.utils import cleanup_email
from flanker.utils import to_utf8
class _AddressParser(object):
'''
Do not use _AddressParser directly because it heavily relies on other
private classes and methods and it's interface is not guarenteed, it
will change in the future and possibly break your application.
Instead use the parse() and parse_list() functions in the address.py
module which will always return a scalar or iterable respectively.
'''
def __init__(self, strict=False):
self.stream = None
self.strict = strict
def address_list(self, stream):
'''
Extract a mailbox and/or url list from a stream of input, operates in
strict and relaxed modes.
'''
# sanity check
if not stream:
raise ParserException('No input provided to parser.')
if isinstance(stream, str) and not is_pure_ascii(stream):
raise ParserException('ASCII string contains non-ASCII chars.')
# to avoid spinning here forever, limit address list length
if len(stream) > MAX_ADDRESS_LIST_LENGTH:
raise ParserException('Stream length exceeds maximum allowable ' + \
'address list length of ' + str(MAX_ADDRESS_LIST_LENGTH) + '.')
# set stream
self.stream = TokenStream(stream)
if self.strict is True:
return self._address_list_strict()
return self._address_list_relaxed()
def address(self, stream):
'''
Extract a single address or url from a stream of input, always
operates in strict mode.
'''
# sanity check
if not stream:
raise ParserException('No input provided to parser.')
if isinstance(stream, str) and not is_pure_ascii(stream):
raise ParserException('ASCII string contains non-ASCII chars.')
# to avoid spinning here forever, limit mailbox length
if len(stream) > MAX_ADDRESS_LENGTH:
raise ParserException('Stream length exceeds maximum allowable ' + \
'address length of ' + str(MAX_ADDRESS_LENGTH) + '.')
self.stream = TokenStream(stream)
addr = self._address()
if addr:
# optional whitespace
self._whitespace()
# if we hit the end of the stream, we have a valid inbox
if self.stream.end_of_stream():
return addr
return None
def address_spec(self, stream):
'''
Extract a single address spec from a stream of input, always
operates in strict mode.
'''
# sanity check
if stream is None:
raise ParserException('No input provided to parser.')
if isinstance(stream, str) and not is_pure_ascii(stream):
raise ParserException('ASCII string contains non-ASCII chars.')
# to avoid spinning here forever, limit mailbox length
if len(stream) > MAX_ADDRESS_LENGTH:
raise ParserException('Stream length exceeds maximum allowable ' + \
'address length of ' + str(MAX_ADDRESS_LENGTH) + '.')
self.stream = TokenStream(stream)
addr = self._addr_spec()
if addr:
# optional whitespace
self._whitespace()
# if we hit the end of the stream, we have a valid inbox
if self.stream.end_of_stream():
return addr
return None
def _mailbox_post_processing_checks(self, address):
"Additional post processing checks to ensure mailbox is valid."
parts = address.split('@')
# check if local part is less than 128 octets, the actual
# limit is 64 octets but we double the size here because
# unsubscribe links are frequently longer
lpart = parts[0]
if len(lpart) > 128:
return False
# check if the domain is less than 255 octets
domn = parts[1]
if len(domn) > 253:
return False
# number of labels can not be over 127
labels = domn.split('.')
if len(labels) > 127:
return False
for label in labels:
# check the domain doesn't start or end with - and
# the length of each label is no more than 63 octets
if BAD_DOMAIN.search(label) or len(label) > 63:
return False
return True
def _address_list_relaxed(self):
"Grammar: address-list-relaxed -> address { delimiter address }"
#addrs = []
addrs = flanker.addresslib.address.AddressList()
unparsable = []
# address
addr = self._address()
if addr is None:
# synchronize to the next delimiter (or end of line)
# append the skipped over text to the unparsable list
skip = self.stream.synchronize()
if skip:
unparsable.append(skip)
# if no mailbox and end of stream, we were unable
# return the unparsable stream
if self.stream.end_of_stream():
return [], unparsable
else:
# if we found a delimiter or end of stream, we have a
# valid mailbox, add it
if self.stream.peek(DELIMITER) or self.stream.end_of_stream():
addrs.append(addr)
else:
# otherwise snychornize and add it the unparsable array
skip = self.stream.synchronize()
if skip:
pre = self.stream.stream[:self.stream.stream.index(skip)]
unparsable.append(pre + skip)
# if we hit the end of the stream, return the results
if self.stream.end_of_stream():
return [], [self.stream.stream]
while True:
# delimiter
dlm = self.stream.get_token(DELIMITER)
if dlm is None:
skip = self.stream.synchronize()
if skip:
unparsable.append(skip)
if self.stream.end_of_stream():
break
# address
start_pos = self.stream.position
addr = self._address()
if addr is None:
skip = self.stream.synchronize()
if skip:
unparsable.append(skip)
if self.stream.end_of_stream():
break
else:
# if we found a delimiter or end of stream, we have a
# valid mailbox, add it
if self.stream.peek(DELIMITER) or self.stream.end_of_stream():
addrs.append(addr)
else:
# otherwise snychornize and add it the unparsable array
skip = self.stream.synchronize()
if skip:
sskip = self.stream.stream[start_pos:self.stream.position]
unparsable.append(sskip)
# if we hit the end of the stream, return the results
if self.stream.end_of_stream():
return addrs, unparsable
return addrs, unparsable
def _address_list_strict(self):
"Grammar: address-list-strict -> address { delimiter address }"
#addrs = []
addrs = flanker.addresslib.address.AddressList()
# address
addr = self._address()
if addr is None:
return addrs
if self.stream.peek(DELIMITER):
addrs.append(addr)
while True:
# delimiter
dlm = self.stream.get_token(DELIMITER)
if dlm is None:
break
# address
addr = self._address()
if addr is None:
break
addrs.append(addr)
return addrs
def _address(self):
"Grammar: address -> name-addr-rfc | name-addr-lax | addr-spec | url"
start_pos = self.stream.position
addr = self._name_addr_rfc() or self._name_addr_lax() or \
self._addr_spec() or self._url()
# if email address, check that it passes post processing checks
if addr and isinstance(addr, flanker.addresslib.address.EmailAddress):
if self._mailbox_post_processing_checks(addr.address) is False:
# roll back
self.stream.position = start_pos
return None
return addr
def _url(self):
"Grammar: url -> url"
earl = self.stream.get_token(URL)
if earl is None:
return None
return flanker.addresslib.address.UrlAddress(to_utf8(earl))
def _name_addr_rfc(self):
"Grammar: name-addr-rfc -> [ display-name-rfc ] angle-addr-rfc"
start_pos = self.stream.position
# optional displayname
dname = self._display_name_rfc()
aaddr = self._angle_addr_rfc()
if aaddr is None:
# roll back
self.stream.position = start_pos
return None
if dname:
return flanker.addresslib.address.EmailAddress(dname, aaddr)
return flanker.addresslib.address.EmailAddress(None, aaddr)
def _display_name_rfc(self):
"Grammar: display-name-rfc -> [ whitespace ] word { whitespace word }"
wrds = []
# optional whitespace
self._whitespace()
# word
wrd = self._word()
if wrd is None:
return None
wrds.append(wrd)
while True:
# whitespace
wtsp = self._whitespace()
if wtsp is None:
break
wrds.append(wtsp)
# word
wrd = self._word()
if wrd is None:
break
wrds.append(wrd)
return cleanup_display_name(''.join(wrds))
def _angle_addr_rfc(self):
'''
Grammar: angle-addr-rfc -> [ whitespace ] < addr-spec > [ whitespace ]"
'''
start_pos = self.stream.position
# optional whitespace
self._whitespace()
# left angle bracket
lbr = self.stream.get_token(LBRACKET)
if lbr is None:
# rollback
self.stream.position = start_pos
return None
# addr-spec
aspec = self._addr_spec(True)
if aspec is None:
# rollback
self.stream.position = start_pos
return None
# right angle bracket
rbr = self.stream.get_token(RBRACKET)
if rbr is None:
# rollback
self.stream.position = start_pos
return None
# optional whitespace
self._whitespace()
return aspec
def _name_addr_lax(self):
"Grammar: name-addr-lax -> [ display-name-lax ] angle-addr-lax"
start_pos = self.stream.position
# optional displayname
dname = self._display_name_lax()
aaddr = self._angle_addr_lax()
if aaddr is None:
# roll back
self.stream.position = start_pos
return None
if dname:
return flanker.addresslib.address.EmailAddress(dname, aaddr)
return flanker.addresslib.address.EmailAddress(None, aaddr)
def _display_name_lax(self):
'''
Grammar: display-name-lax ->
[ whitespace ] word { whitespace word } whitespace"
'''
start_pos = self.stream.position
wrds = []
# optional whitespace
self._whitespace()
# word
wrd = self._word()
if wrd is None:
# roll back
self.stream.position = start_pos
return None
wrds.append(wrd)
# peek to see if we have a whitespace,
# if we don't, we have a invalid display-name
if self.stream.peek(WHITESPACE) is None or \
self.stream.peek(UNI_WHITE) is None:
self.stream.position = start_pos
return None
while True:
# whitespace
wtsp = self._whitespace()
if wtsp:
wrds.append(wtsp)
# if we need to roll back the next word
start_pos = self.stream.position
# word
wrd = self._word()
if wrd is None:
self.stream.position = start_pos
break
wrds.append(wrd)
# peek to see if we have a whitespace
# if we don't pop off the last word break
if self.stream.peek(WHITESPACE) is None or \
self.stream.peek(UNI_WHITE) is None:
# roll back last word
self.stream.position = start_pos
wrds.pop()
break
return cleanup_display_name(''.join(wrds))
def _angle_addr_lax(self):
"Grammar: angle-addr-lax -> addr-spec [ whitespace ]"
start_pos = self.stream.position
# addr-spec
aspec = self._addr_spec(True)
if aspec is None:
# rollback
self.stream.position = start_pos
return None
# optional whitespace
self._whitespace()
return aspec
def _addr_spec(self, as_string=False):
'''
Grammar: addr-spec -> [ whitespace ] local-part @ domain [ whitespace ]
'''
start_pos = self.stream.position
# optional whitespace
self._whitespace()
lpart = self._local_part()
if lpart is None:
# rollback
self.stream.position = start_pos
return None
asym = self.stream.get_token(AT_SYMBOL)
if asym is None:
# rollback
self.stream.position = start_pos
return None
domn = self._domain()
if domn is None:
# rollback
self.stream.position = start_pos
return None
# optional whitespace
self._whitespace()
aspec = cleanup_email(''.join([lpart, asym, domn]))
if as_string:
return aspec
return flanker.addresslib.address.EmailAddress(None, aspec)
def _local_part(self):
"Grammar: local-part -> dot-atom | quoted-string"
return self.stream.get_token(DOT_ATOM) or \
self.stream.get_token(QSTRING)
def _domain(self):
"Grammar: domain -> dot-atom"
return self.stream.get_token(DOT_ATOM)
def _word(self):
"Grammar: word -> word-ascii | word-unicode"
start_pos = self.stream.position
# ascii word
ascii_wrd = self._word_ascii()
if ascii_wrd and not self.stream.peek(UNI_ATOM):
return ascii_wrd
# didn't get an ascii word, rollback to try again
self.stream.position = start_pos
# unicode word
return self._word_unicode()
def _word_ascii(self):
"Grammar: word-ascii -> atom | qstring"
wrd = self.stream.get_token(RELAX_ATOM) or self.stream.get_token(QSTRING)
if wrd and not contains_control_chars(wrd):
return wrd
return None
def _word_unicode(self):
"Grammar: word-unicode -> unicode-atom | unicode-qstring"
start_pos = self.stream.position
# unicode atom
uwrd = self.stream.get_token(UNI_ATOM)
if uwrd and isinstance(uwrd, unicode) and not contains_control_chars(uwrd):
return uwrd
# unicode qstr
uwrd = self.stream.get_token(UNI_QSTR, 'qstr')
if uwrd and isinstance(uwrd, unicode) and not contains_control_chars(uwrd):
return u'"{}"'.format(encode_string(None, uwrd))
# rollback
self.stream.position = start_pos
return None
def _whitespace(self):
"Grammar: whitespace -> whitespace-ascii | whitespace-unicode"
return self._whitespace_ascii() or self._whitespace_unicode()
def _whitespace_ascii(self):
"Grammar: whitespace-ascii -> whitespace-ascii"
return self.stream.get_token(WHITESPACE)
def _whitespace_unicode(self):
"Grammar: whitespace-unicode -> whitespace-unicode"
uwhite = self.stream.get_token(UNI_WHITE)
if uwhite and not is_pure_ascii(uwhite):
return uwhite
return None
class ParserException(Exception):
'''
Exception raised when the parser encounters some parsing exception.
'''
def __init__(self, reason='Unknown parser error.'):
self.reason = reason
def __str__(self):
return self.reason
MAX_ADDRESS_LENGTH = 512
MAX_ADDRESS_NUMBER = 1024
MAX_ADDRESS_LIST_LENGTH = MAX_ADDRESS_LENGTH * MAX_ADDRESS_NUMBER
| 32.517018
| 83
| 0.598465
|
4a7eff8aae3fca1f8d7e3f6770a6f9f9dd04524b
| 679
|
py
|
Python
|
fehler_auth/urls.py
|
dhavall13/fehler_core
|
dd27802d5b227a32aebcc8bfde68e78a69a36d66
|
[
"MIT"
] | null | null | null |
fehler_auth/urls.py
|
dhavall13/fehler_core
|
dd27802d5b227a32aebcc8bfde68e78a69a36d66
|
[
"MIT"
] | null | null | null |
fehler_auth/urls.py
|
dhavall13/fehler_core
|
dd27802d5b227a32aebcc8bfde68e78a69a36d66
|
[
"MIT"
] | null | null | null |
from django.urls import path
from .views import (
CustomObtainAuthToken,
RegisterUser,
InviteUserApi,
VerificationView,
UserDetails,
)
urlpatterns = [
path("register/", RegisterUser.as_view(), name="create_user"),
path("token/", CustomObtainAuthToken.as_view(), name="token_obtain"),
# path('<space_id>/invite/', views.UserInvite.as_view(), name='invite'),
path("spaces/<str:space_name>/invite/", InviteUserApi.as_view(), name="inviteapi"),
path(
"<space_id>/activate/<uid64>/<token>/",
VerificationView.as_view(),
name="activate",
),
path("user-details/", UserDetails.as_view(), name="user_details"),
]
| 28.291667
| 87
| 0.659794
|
198b6514a8a63b0d653453271ac73346a991cd3a
| 205
|
py
|
Python
|
pyplex/material.py
|
pyplex/pyplex
|
66e19acb3efd1a8a69d28022edcb0b6ad5cb6b11
|
[
"MIT"
] | 5
|
2018-01-17T09:08:38.000Z
|
2020-09-20T20:38:51.000Z
|
pyplex/material.py
|
pyplex/pyplex
|
66e19acb3efd1a8a69d28022edcb0b6ad5cb6b11
|
[
"MIT"
] | null | null | null |
pyplex/material.py
|
pyplex/pyplex
|
66e19acb3efd1a8a69d28022edcb0b6ad5cb6b11
|
[
"MIT"
] | null | null | null |
from pyplex.mesh import Mesh
from pyplex.camera import Camera
from pyplex.light import Light
from typing import List
class Material:
def render(self, mesh: Mesh):
raise NotImplementedError()
| 20.5
| 35
| 0.760976
|
cb98678d1ef651609aef2cdd13bedb0ab853c7f6
| 2,434
|
py
|
Python
|
aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/MetastoreCreateKafkaTopicRequest.py
|
sdk-team/aliyun-openapi-python-sdk
|
384730d707e6720d1676ccb8f552e6a7b330ec86
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/MetastoreCreateKafkaTopicRequest.py
|
sdk-team/aliyun-openapi-python-sdk
|
384730d707e6720d1676ccb8f552e6a7b330ec86
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/MetastoreCreateKafkaTopicRequest.py
|
sdk-team/aliyun-openapi-python-sdk
|
384730d707e6720d1676ccb8f552e6a7b330ec86
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class MetastoreCreateKafkaTopicRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Emr', '2016-04-08', 'MetastoreCreateKafkaTopic')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_DataSourceId(self):
return self.get_query_params().get('DataSourceId')
def set_DataSourceId(self,DataSourceId):
self.add_query_param('DataSourceId',DataSourceId)
def get_TopicName(self):
return self.get_query_params().get('TopicName')
def set_TopicName(self,TopicName):
self.add_query_param('TopicName',TopicName)
def get_AdvancedConfigs(self):
return self.get_query_params().get('AdvancedConfigs')
def set_AdvancedConfigs(self,AdvancedConfigs):
for i in range(len(AdvancedConfigs)):
if AdvancedConfigs[i].get('Value') is not None:
self.add_query_param('AdvancedConfig.' + str(i + 1) + '.Value' , AdvancedConfigs[i].get('Value'))
if AdvancedConfigs[i].get('Key') is not None:
self.add_query_param('AdvancedConfig.' + str(i + 1) + '.Key' , AdvancedConfigs[i].get('Key'))
def get_NumPartitions(self):
return self.get_query_params().get('NumPartitions')
def set_NumPartitions(self,NumPartitions):
self.add_query_param('NumPartitions',NumPartitions)
def get_ReplicationFactor(self):
return self.get_query_params().get('ReplicationFactor')
def set_ReplicationFactor(self,ReplicationFactor):
self.add_query_param('ReplicationFactor',ReplicationFactor)
| 37.446154
| 102
| 0.760477
|
67c6d8295c564235ab83e10dd20948d709964503
| 9,170
|
py
|
Python
|
src/common/layers.py
|
banbiossa/deep-learning-from-scrach
|
d183a73ad27c68a79500c35a94c174ce0455940c
|
[
"FTL"
] | null | null | null |
src/common/layers.py
|
banbiossa/deep-learning-from-scrach
|
d183a73ad27c68a79500c35a94c174ce0455940c
|
[
"FTL"
] | null | null | null |
src/common/layers.py
|
banbiossa/deep-learning-from-scrach
|
d183a73ad27c68a79500c35a94c174ce0455940c
|
[
"FTL"
] | null | null | null |
# coding: utf-8
import numpy as np
from .functions import sigmoid, softmax, cross_entropy_error
from .util import im2col, col2im
import logging
logger = logging.getLogger(__name__)
class Relu:
def __init__(self):
self.mask = None
def forward(self, x):
self.mask = (x <= 0)
out = x.copy()
out[self.mask] = 0
return out
def backward(self, dout):
dout[self.mask] = 0
dx = dout
return dx
class Sigmoid:
def __init__(self):
self.out = None
def forward(self, x):
out = sigmoid(x)
self.out = out
return out
def backward(self, dout):
dx = dout * (1.0 - self.out) * self.out
return dx
class Affine:
def __init__(self, W, b):
self.W = W
self.b = b
self.x = None
self.original_x_shape = None
# 重み・バイアスパラメータの微分
self.dW = None
self.db = None
def forward(self, x):
# テンソル対応
self.original_x_shape = x.shape
x = x.reshape(x.shape[0], -1)
self.x = x
out = np.dot(self.x, self.W) + self.b
return out
def backward(self, dout):
dx = np.dot(dout, self.W.T)
self.dW = np.dot(self.x.T, dout)
self.db = np.sum(dout, axis=0)
dx = dx.reshape(*self.original_x_shape)
# 入力データの形状に戻す(テンソル対応)
return dx
class SoftmaxWithLoss:
def __init__(self):
self.loss = None
self.y = None # softmaxの出力
self.t = None # 教師データ
def forward(self, x, t):
self.t = t
self.y = softmax(x)
self.loss = cross_entropy_error(self.y, self.t)
return self.loss
def backward(self, dout=1):
batch_size = self.t.shape[0]
if self.t.size == self.y.size: # 教師データがone-hot-vectorの場合
dx = (self.y - self.t) / batch_size
else:
dx = self.y.copy()
dx[np.arange(batch_size), self.t] -= 1
dx = dx / batch_size
return dx
class Dropout:
"""
http://arxiv.org/abs/1207.0580
"""
def __init__(self, dropout_ratio=0.5):
self.dropout_ratio = dropout_ratio
self.mask = None
def forward(self, x, train_flg=True):
if train_flg:
self.mask = np.random.rand(*x.shape) > self.dropout_ratio
return x * self.mask
else:
return x * (1.0 - self.dropout_ratio)
def backward(self, dout):
return dout * self.mask
class BatchNormalization:
"""
http://arxiv.org/abs/1502.03167
"""
def __init__(self, gamma, beta, momentum=0.9, running_mean=None, running_var=None):
self.gamma = gamma
self.beta = beta
self.momentum = momentum
self.input_shape = None
# Conv層の場合は4次元、全結合層の場合は2次元
# テスト時に使用する平均と分散
self.running_mean = running_mean
self.running_var = running_var
# backward時に使用する中間データ
self.batch_size = None
self.xc = None
self.std = None
self.dgamma = None
self.dbeta = None
def forward(self, x, train_flg=True):
self.input_shape = x.shape
if x.ndim != 2:
N, C, H, W = x.shape
x = x.reshape(N, -1)
out = self.__forward(x, train_flg)
return out.reshape(*self.input_shape)
def __forward(self, x, train_flg):
if self.running_mean is None:
N, D = x.shape
self.running_mean = np.zeros(D)
self.running_var = np.zeros(D)
if train_flg:
mu = x.mean(axis=0)
xc = x - mu
var = np.mean(xc**2, axis=0)
std = np.sqrt(var + 10e-7)
xn = xc / std
self.batch_size = x.shape[0]
self.xc = xc
self.xn = xn
self.std = std
self.running_mean = self.momentum * \
self.running_mean + (1-self.momentum) * mu
self.running_var = self.momentum * \
self.running_var + (1-self.momentum) * var
else:
xc = x - self.running_mean
xn = xc / ((np.sqrt(self.running_var + 10e-7)))
out = self.gamma * xn + self.beta
return out
def backward(self, dout):
if dout.ndim != 2:
N, C, H, W = dout.shape
dout = dout.reshape(N, -1)
dx = self.__backward(dout)
dx = dx.reshape(*self.input_shape)
return dx
def __backward(self, dout):
dbeta = dout.sum(axis=0)
dgamma = np.sum(self.xn * dout, axis=0)
dxn = self.gamma * dout
dxc = dxn / self.std
dstd = -np.sum((dxn * self.xc) / (self.std * self.std), axis=0)
dvar = 0.5 * dstd / self.std
dxc += (2.0 / self.batch_size) * self.xc * dvar
dmu = np.sum(dxc, axis=0)
dx = dxc - dmu / self.batch_size
self.dgamma = dgamma
self.dbeta = dbeta
return dx
class Convolution:
def __init__(self, W, b, stride=1, pad=0):
self.W = W
self.b = b
self.stride = stride
self.pad = pad
# 中間データ(backward時に使用)
self.x = None
self.col = None
self.col_W = None
# 重み・バイアスパラメータの勾配
self.dW = None
self.db = None
def forward(self, x):
FN, C, FH, FW = self.W.shape
N, C, H, W = x.shape
out_h = 1 + int((H + 2*self.pad - FH) / self.stride)
out_w = 1 + int((W + 2*self.pad - FW) / self.stride)
col = im2col(x, FH, FW, self.stride, self.pad)
col_W = self.W.reshape(FN, -1).T
out = np.dot(col, col_W) + self.b
out = out.reshape(N, out_h, out_w, -1).transpose(0, 3, 1, 2)
self.x = x
self.col = col
self.col_W = col_W
return out
def backward(self, dout):
FN, C, FH, FW = self.W.shape
dout = dout.transpose(0, 2, 3, 1).reshape(-1, FN)
self.db = np.sum(dout, axis=0)
self.dW = np.dot(self.col.T, dout)
self.dW = self.dW.transpose(1, 0).reshape(FN, C, FH, FW)
dcol = np.dot(dout, self.col_W.T)
dx = col2im(dcol, self.x.shape, FH, FW, self.stride, self.pad)
return dx
class Pooling:
def __init__(self, pool_h, pool_w, stride=1, pad=0):
self.pool_h = pool_h
self.pool_w = pool_w
self.stride = stride
self.pad = pad
self.x = None
self.arg_max = None
def forward(self, x):
N, C, H, W = x.shape
out_h = int(1 + (H - self.pool_h) / self.stride)
out_w = int(1 + (W - self.pool_w) / self.stride)
col = im2col(x, self.pool_h, self.pool_w, self.stride, self.pad)
col = col.reshape(-1, self.pool_h*self.pool_w)
arg_max = np.argmax(col, axis=1)
out = np.max(col, axis=1)
out = out.reshape(N, out_h, out_w, C).transpose(0, 3, 1, 2)
self.x = x
self.arg_max = arg_max
return out
def backward(self, dout):
dout = dout.transpose(0, 2, 3, 1)
pool_size = self.pool_h * self.pool_w
dmax = np.zeros((dout.size, pool_size))
dmax[np.arange(self.arg_max.size), self.arg_max.flatten()] = \
dout.flatten()
dmax = dmax.reshape(dout.shape + (pool_size,))
dcol = dmax.reshape(dmax.shape[0] * dmax.shape[1] * dmax.shape[2],
-1)
dx = col2im(dcol, self.x.shape, self.pool_h, self.pool_w,
self.stride, self.pad)
return dx
| 30.875421
| 91
| 0.434024
|
c4dcb3084fb8a2c3ea1a96798aa85f320a424622
| 11,228
|
py
|
Python
|
tests/test_imgaug.py
|
cannon/albumentations
|
112aaef4e447ba24bf1ebc2a7fdfc40bd342a2be
|
[
"MIT"
] | null | null | null |
tests/test_imgaug.py
|
cannon/albumentations
|
112aaef4e447ba24bf1ebc2a7fdfc40bd342a2be
|
[
"MIT"
] | null | null | null |
tests/test_imgaug.py
|
cannon/albumentations
|
112aaef4e447ba24bf1ebc2a7fdfc40bd342a2be
|
[
"MIT"
] | null | null | null |
import cv2
import imgaug as ia
import numpy as np
import pytest
from albumentations import Compose
from albumentations.augmentations.bbox_utils import (
convert_bboxes_from_albumentations,
convert_bboxes_to_albumentations,
)
import albumentations as A
from albumentations.imgaug.transforms import (
IAAPiecewiseAffine,
IAAFliplr,
IAAFlipud,
IAASuperpixels,
IAASharpen,
IAAAdditiveGaussianNoise,
IAAPerspective,
IAAAffine,
IAACropAndPad,
)
from tests.utils import set_seed
TEST_SEEDS = (0, 1, 42, 111, 9999)
@pytest.mark.parametrize("augmentation_cls", [IAASuperpixels, IAASharpen, IAAAdditiveGaussianNoise])
def test_imgaug_image_only_augmentations(augmentation_cls, image, mask):
aug = augmentation_cls(p=1)
data = aug(image=image, mask=mask)
assert data["image"].dtype == np.uint8
assert data["mask"].dtype == np.uint8
assert np.array_equal(data["mask"], mask)
@pytest.mark.parametrize("augmentation_cls", [IAAPiecewiseAffine, IAAPerspective])
def test_imgaug_dual_augmentations(augmentation_cls, image, mask):
aug = augmentation_cls(p=1)
data = aug(image=image, mask=mask)
assert data["image"].dtype == np.uint8
assert data["mask"].dtype == np.uint8
@pytest.mark.parametrize("augmentation_cls", [IAAPiecewiseAffine, IAAFliplr])
def test_imagaug_dual_augmentations_are_deterministic(augmentation_cls, image):
aug = augmentation_cls(p=1)
mask = np.copy(image)
for _i in range(10):
data = aug(image=image, mask=mask)
assert np.array_equal(data["image"], data["mask"])
def test_imagaug_fliplr_transform_bboxes(image):
aug = IAAFliplr(p=1)
mask = np.copy(image)
bboxes = [(10, 10, 20, 20), (20, 10, 30, 40)]
expect = [(80, 10, 90, 20), (70, 10, 80, 40)]
bboxes = convert_bboxes_to_albumentations(bboxes, "pascal_voc", rows=image.shape[0], cols=image.shape[1])
data = aug(image=image, mask=mask, bboxes=bboxes)
actual = convert_bboxes_from_albumentations(data["bboxes"], "pascal_voc", rows=image.shape[0], cols=image.shape[1])
assert np.array_equal(data["image"], data["mask"])
assert np.allclose(actual, expect)
def test_imagaug_flipud_transform_bboxes(image):
aug = IAAFlipud(p=1)
mask = np.copy(image)
dummy_class = 1234
bboxes = [(10, 10, 20, 20, dummy_class), (20, 10, 30, 40, dummy_class)]
expect = [(10, 80, 20, 90, dummy_class), (20, 60, 30, 90, dummy_class)]
bboxes = convert_bboxes_to_albumentations(bboxes, "pascal_voc", rows=image.shape[0], cols=image.shape[1])
data = aug(image=image, mask=mask, bboxes=bboxes)
actual = convert_bboxes_from_albumentations(data["bboxes"], "pascal_voc", rows=image.shape[0], cols=image.shape[1])
assert np.array_equal(data["image"], data["mask"])
assert np.allclose(actual, expect)
@pytest.mark.parametrize(
["aug", "keypoints", "expected"],
[
[IAAFliplr, [(20, 30, 0, 0)], [(80, 30, 0, 0)]],
[IAAFliplr, [(20, 30, 45, 0)], [(80, 30, 45, 0)]],
[IAAFliplr, [(20, 30, 90, 0)], [(80, 30, 90, 0)]],
#
[IAAFlipud, [(20, 30, 0, 0)], [(20, 70, 0, 0)]],
[IAAFlipud, [(20, 30, 45, 0)], [(20, 70, 45, 0)]],
[IAAFlipud, [(20, 30, 90, 0)], [(20, 70, 90, 0)]],
],
)
def test_keypoint_transform_format_xy(aug, keypoints, expected):
transform = Compose([aug(p=1)], keypoint_params={"format": "xy", "label_fields": ["labels"]})
image = np.ones((100, 100, 3))
transformed = transform(image=image, keypoints=keypoints, labels=np.ones(len(keypoints)))
assert np.allclose(expected, transformed["keypoints"])
@pytest.mark.parametrize(["aug", "keypoints", "expected"], [[IAAFliplr, [[20, 30, 0, 0]], [[79, 30, 0, 0]]]])
def test_iaa_transforms_emit_warning(aug, keypoints, expected):
with pytest.warns(UserWarning, match="IAAFliplr transformation supports only 'xy' keypoints augmentation"):
Compose([aug(p=1)], keypoint_params={"format": "xyas", "label_fields": ["labels"]})
@pytest.mark.parametrize(
["augmentation_cls", "params"],
[
[A.IAASuperpixels, {}],
[A.IAAAdditiveGaussianNoise, {}],
[A.IAACropAndPad, {}],
[A.IAAFliplr, {}],
[A.IAAFlipud, {}],
[A.IAAAffine, {}],
[A.IAAPiecewiseAffine, {}],
[A.IAAPerspective, {}],
],
)
@pytest.mark.parametrize("p", [0.5, 1])
@pytest.mark.parametrize("seed", TEST_SEEDS)
@pytest.mark.parametrize("always_apply", (False, True))
def test_imgaug_augmentations_serialization(augmentation_cls, params, p, seed, image, mask, always_apply):
aug = augmentation_cls(p=p, always_apply=always_apply, **params)
serialized_aug = A.to_dict(aug)
deserialized_aug = A.from_dict(serialized_aug)
set_seed(seed)
ia.seed(seed)
aug_data = aug(image=image, mask=mask)
set_seed(seed)
ia.seed(seed)
deserialized_aug_data = deserialized_aug(image=image, mask=mask)
assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
assert np.array_equal(aug_data["mask"], deserialized_aug_data["mask"])
@pytest.mark.parametrize(
["augmentation_cls", "params"],
[
[A.IAASuperpixels, {}],
[A.IAAAdditiveGaussianNoise, {}],
[A.IAACropAndPad, {}],
[A.IAAFliplr, {}],
[A.IAAFlipud, {}],
[A.IAAAffine, {}],
[A.IAAPiecewiseAffine, {}],
[A.IAAPerspective, {}],
],
)
@pytest.mark.parametrize("p", [0.5, 1])
@pytest.mark.parametrize("seed", TEST_SEEDS)
@pytest.mark.parametrize("always_apply", (False, True))
def test_imgaug_augmentations_for_bboxes_serialization(
augmentation_cls, params, p, seed, image, albumentations_bboxes, always_apply
):
aug = augmentation_cls(p=p, always_apply=always_apply, **params)
serialized_aug = A.to_dict(aug)
deserialized_aug = A.from_dict(serialized_aug)
set_seed(seed)
ia.seed(seed)
aug_data = aug(image=image, bboxes=albumentations_bboxes)
set_seed(seed)
ia.seed(seed)
deserialized_aug_data = deserialized_aug(image=image, bboxes=albumentations_bboxes)
assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
assert np.array_equal(aug_data["bboxes"], deserialized_aug_data["bboxes"])
@pytest.mark.parametrize(
["augmentation_cls", "params"],
[
[A.IAASuperpixels, {}],
[A.IAAAdditiveGaussianNoise, {}],
[A.IAACropAndPad, {}],
[A.IAAFliplr, {}],
[A.IAAFlipud, {}],
[A.IAAAffine, {}],
[A.IAAPiecewiseAffine, {}],
[A.IAAPerspective, {}],
],
)
@pytest.mark.parametrize("p", [0.5, 1])
@pytest.mark.parametrize("seed", TEST_SEEDS)
@pytest.mark.parametrize("always_apply", (False, True))
def test_imgaug_augmentations_for_keypoints_serialization(
augmentation_cls, params, p, seed, image, keypoints, always_apply
):
aug = augmentation_cls(p=p, always_apply=always_apply, **params)
serialized_aug = A.to_dict(aug)
deserialized_aug = A.from_dict(serialized_aug)
set_seed(seed)
ia.seed(seed)
aug_data = aug(image=image, keypoints=keypoints)
set_seed(seed)
ia.seed(seed)
deserialized_aug_data = deserialized_aug(image=image, keypoints=keypoints)
assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
assert np.array_equal(aug_data["keypoints"], deserialized_aug_data["keypoints"])
@pytest.mark.parametrize(
["augmentation_cls", "params"],
[
[IAAAffine, {"scale": 1.5}],
[IAAPiecewiseAffine, {"scale": 1.5}],
[IAAPerspective, {}],
],
)
def test_imgaug_transforms_binary_mask_interpolation(augmentation_cls, params):
"""Checks whether transformations based on DualTransform does not introduce a mask interpolation artifacts"""
aug = augmentation_cls(p=1, **params)
image = np.random.randint(low=0, high=256, size=(100, 100, 3), dtype=np.uint8)
mask = np.random.randint(low=0, high=2, size=(100, 100), dtype=np.uint8)
data = aug(image=image, mask=mask)
assert np.array_equal(np.unique(data["mask"]), np.array([0, 1]))
def __test_multiprocessing_support_proc(args):
x, transform = args
return transform(image=x)
@pytest.mark.parametrize(
["augmentation_cls", "params"],
[
[IAAAffine, {"scale": 1.5}],
[IAAPiecewiseAffine, {"scale": 1.5}],
[IAAPerspective, {}],
],
)
def test_imgaug_transforms_multiprocessing_support(augmentation_cls, params, multiprocessing_context):
"""Checks whether we can use augmentations in multiprocessing environments"""
aug = augmentation_cls(p=1, **params)
image = np.random.randint(low=0, high=256, size=(100, 100, 3), dtype=np.uint8)
pool = multiprocessing_context.Pool(8)
pool.map(__test_multiprocessing_support_proc, map(lambda x: (x, aug), [image] * 100))
pool.close()
pool.join()
@pytest.mark.parametrize(
["img_dtype", "px", "percent", "pad_mode", "pad_cval"],
[
[np.uint8, 10, None, cv2.BORDER_CONSTANT, 0],
[np.uint8, -10, None, cv2.BORDER_CONSTANT, 0],
[np.uint8, None, 0.1, cv2.BORDER_CONSTANT, 0],
[np.uint8, None, -0.1, cv2.BORDER_CONSTANT, 0],
],
)
def test_compare_crop_and_pad(img_dtype, px, percent, pad_mode, pad_cval):
h, w, c = 100, 100, 3
mode_mapping = {
cv2.BORDER_CONSTANT: "constant",
cv2.BORDER_REPLICATE: "edge",
cv2.BORDER_REFLECT101: "reflect",
cv2.BORDER_WRAP: "wrap",
}
pad_mode_iaa = mode_mapping[pad_mode]
bbox_params = A.BboxParams(format="pascal_voc")
keypoint_params = A.KeypointParams(format="xy", remove_invisible=False)
keypoints = np.random.randint(0, min(h, w), [10, 2])
bboxes = []
for i in range(10):
x1, y1 = np.random.randint(0, min(h, w) - 2, 2)
x2 = np.random.randint(x1 + 1, w - 1)
y2 = np.random.randint(y1 + 1, h - 1)
bboxes.append([x1, y1, x2, y2, 0])
transform_albu = A.Compose(
[
A.CropAndPad(
px=px,
percent=percent,
pad_mode=pad_mode,
pad_cval=pad_cval,
p=1,
interpolation=cv2.INTER_AREA
if (px is not None and px < 0) or (percent is not None and percent < 0)
else cv2.INTER_LINEAR,
)
],
bbox_params=bbox_params,
keypoint_params=keypoint_params,
)
transform_iaa = A.Compose(
[IAACropAndPad(px=px, percent=percent, pad_mode=pad_mode_iaa, pad_cval=pad_cval, p=1)],
bbox_params=bbox_params,
keypoint_params=keypoint_params,
)
if img_dtype == np.uint8:
img = np.random.randint(0, 256, (h, w, c), dtype=np.uint8)
else:
img = np.random.random((h, w, c)).astype(img_dtype)
res_albu = transform_albu(image=img, keypoints=keypoints, bboxes=bboxes)
res_iaa = transform_iaa(image=img, keypoints=keypoints, bboxes=bboxes)
for key, item in res_albu.items():
if key == "bboxes":
bboxes = np.array(res_iaa[key])
h = bboxes[:, 3] - bboxes[:, 1]
w = bboxes[:, 2] - bboxes[:, 0]
res_iaa[key] = bboxes[(h > 0) & (w > 0)]
assert np.allclose(item, res_iaa[key]), f"{key} are not equal"
| 36.33657
| 119
| 0.652298
|
0cbaf31872f20d877fa2ae9fd54be33841519ff4
| 1,069
|
py
|
Python
|
src/chrome/common/extensions/docs/server2/url_constants.py
|
jxjnjjn/chromium
|
435c1d02fd1b99001dc9e1e831632c894523580d
|
[
"Apache-2.0"
] | 9
|
2018-09-21T05:36:12.000Z
|
2021-11-15T15:14:36.000Z
|
src/chrome/common/extensions/docs/server2/url_constants.py
|
jxjnjjn/chromium
|
435c1d02fd1b99001dc9e1e831632c894523580d
|
[
"Apache-2.0"
] | null | null | null |
src/chrome/common/extensions/docs/server2/url_constants.py
|
jxjnjjn/chromium
|
435c1d02fd1b99001dc9e1e831632c894523580d
|
[
"Apache-2.0"
] | 3
|
2018-11-28T14:54:13.000Z
|
2020-07-02T07:36:07.000Z
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
GITHUB_URL = 'https://api.github.com/repos/GoogleChrome/chrome-app-samples'
GITHUB_BASE = 'https://github.com/GoogleChrome/chrome-app-samples/tree/master'
RAW_GITHUB_BASE = ('https://github.com/GoogleChrome/chrome-app-samples/raw/'
'master')
OMAHA_PROXY_URL = 'http://omahaproxy.appspot.com/json'
SVN_URL = 'http://src.chromium.org/chrome'
VIEWVC_URL = 'http://src.chromium.org/viewvc/chrome'
SVN_TRUNK_URL = SVN_URL + '/trunk'
SVN_BRANCH_URL = SVN_URL + '/branches'
OPEN_ISSUES_CSV_URL = (
'http://code.google.com/p/chromium/issues/csv?can=1&'
'q=Hotlist%3DKnownIssue%20Feature%3DApps+is%3Aopen')
CLOSED_ISSUES_CSV_URL = (
'http://code.google.com/p/chromium/issues/csv?can=1&'
'q=Hotlist%3DKnownIssue+Feature%3DApps+-is%3Aopen')
EXTENSIONS_SAMPLES = ('http://src.chromium.org/viewvc/chrome/trunk/src/chrome/'
'common/extensions/docs/examples')
| 48.590909
| 79
| 0.724977
|
658f39c4ca2a10c46ee3a3ab3646da50e9094717
| 4,168
|
py
|
Python
|
src/matchers/structure/s_ind_matchers/s_phipsi.py
|
MelvinYin/Defined_Proteins
|
75da20be82a47d85d27176db29580ab87d52b670
|
[
"BSD-3-Clause"
] | 2
|
2021-01-05T02:55:57.000Z
|
2021-04-16T15:49:08.000Z
|
src/matchers/structure/s_ind_matchers/s_phipsi.py
|
MelvinYin/Defined_Proteins
|
75da20be82a47d85d27176db29580ab87d52b670
|
[
"BSD-3-Clause"
] | null | null | null |
src/matchers/structure/s_ind_matchers/s_phipsi.py
|
MelvinYin/Defined_Proteins
|
75da20be82a47d85d27176db29580ab87d52b670
|
[
"BSD-3-Clause"
] | 1
|
2021-01-05T08:12:38.000Z
|
2021-01-05T08:12:38.000Z
|
import numpy as np
import math
from sklearn.mixture import BayesianGaussianMixture as GM
import sys
np.random.seed(1)
import random
random.seed(1)
class _PhipsiMatcher:
def __init__(self):
# only accept from a single relative_sno, only values.
self.to_skip = False
self.weight_scaling_factor = 10 # so self.weight is not too low.
self.q_scaling_factor = 1
self.weight_accom_factor = 0.2
def load(self, phipsis):
self.length = len(phipsis)
if np.allclose(phipsis, np.full(phipsis.shape, 360)):
self.to_skip = True
return
i_to_ignore = np.array(phipsis == np.array([360., 360.]))[:, 0]
self.ignored_i = i_to_ignore
phipsis = phipsis[~i_to_ignore]
phipsi_median = np.median(phipsis, axis=0)
phipsis = phipsis - phipsi_median
phipsis[phipsis > 180] -= 360.
phipsis[phipsis < -180] += 360.
num_component = min(10, self.length)
gm_ = GM(n_components=num_component, max_iter=10000)
gm_.fit(X=phipsis)
weights = gm_.weights_
to_keep = weights > 0.05
num_component = sum(to_keep)
gm = GM(n_components=num_component, max_iter=10000)
gm.fit(X=phipsis)
precisions = gm.precisions_cholesky_
# self.means = gm.means_
self.phipsis = phipsis
self.medians = phipsi_median
weight = np.mean(precisions[:, 0, 0]) \
+ np.mean(precisions[:, 1, 1])
weight = weight * self.weight_scaling_factor # for matcher weight
self.weight = min(weight, 1)
self.weight *= self.weight_accom_factor
covs = gm.covariances_
cov_invs = np.array([np.linalg.inv(cov) for cov in covs])
cluster_dist = gm.predict_proba(phipsis)
self.cov_dist = np.einsum("ijk, li->ljk", cov_invs, cluster_dist)
self.gm = gm # for matcher weight
# matcher_weight should be a product of the precision/clustering
# behaviour of the distribution, and the posterior probability of the
# queried point. So, higher clustering but point does not belong in
# distribution => other pressures acting on queried point => should
# assign lower weight. Lower clustering and point belong => low
# clustering means low pressure on point, so it shouldn't matter that
# much.
return
def query(self, q_phipsi):
if self.to_skip:
return 0., np.zeros(self.length)
if np.allclose(q_phipsi, np.full(q_phipsi.shape, 360)):
return 0., np.zeros(self.length)
q_phipsi = q_phipsi - self.medians
q_phipsi[q_phipsi > 180] -= 360.
q_phipsi[q_phipsi < -180] += 360.
# get matcher weight
q_fit_in_dist_log = self.gm.score_samples(np.array([q_phipsi]))
assert len(q_fit_in_dist_log) == 1
q_fit_in_dist_log = q_fit_in_dist_log[0]
# 9.69 benchmark, for which max will be 1
q_fit_in_dist = np.exp(q_fit_in_dist_log + 9.69)
q_fit_in_dist = q_fit_in_dist * self.q_scaling_factor
q_fit_in_dist = min(q_fit_in_dist, 1.)
# scaling is a bit harsh
matcher_weight = q_fit_in_dist * self.weight
to_each_phipsis = self.phipsis - q_phipsi
erf_arg = np.sqrt(np.einsum('ki, kij, kj -> k', to_each_phipsis,
self.cov_dist, to_each_phipsis))
ps = np.array([1 - math.erf(i) for i in erf_arg])
if len(ps) != self.length:
assert any(self.ignored_i)
i_s = np.argwhere(self.ignored_i)
for i in i_s[::-1]:
i = i[0]
ps = np.concatenate([ps[:i], [0.], ps[i:]])
assert len(ps) == self.length
return matcher_weight, ps
class PhipsiMatcher:
def __init__(self):
self.matcher = _PhipsiMatcher()
def load(self, df):
phipsi = df[['phi', 'psi']].values
self.matcher.load(phipsi)
return
def query(self, df):
phipsi = df[['phi', 'psi']].values
assert len(phipsi) == 1
phipsi = phipsi[0]
return self.matcher.query(phipsi)
| 37.214286
| 77
| 0.609165
|
b160932b0fcc2125dc61d024054678955b01906a
| 386
|
py
|
Python
|
defacto_zips/w9s1_slam/build/hector_slam/hector_slam_launch/catkin_generated/pkg.installspace.context.pc.py
|
Psyf/WincentPanzer
|
da546042ce5b22f45e84f3568a9d0825695d5aaa
|
[
"MIT"
] | null | null | null |
defacto_zips/w9s1_slam/build/hector_slam/hector_slam_launch/catkin_generated/pkg.installspace.context.pc.py
|
Psyf/WincentPanzer
|
da546042ce5b22f45e84f3568a9d0825695d5aaa
|
[
"MIT"
] | null | null | null |
defacto_zips/w9s1_slam/build/hector_slam/hector_slam_launch/catkin_generated/pkg.installspace.context.pc.py
|
Psyf/WincentPanzer
|
da546042ce5b22f45e84f3568a9d0825695d5aaa
|
[
"MIT"
] | null | null | null |
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "hector_slam_launch"
PROJECT_SPACE_DIR = "/home/pi/Desktop/w9s1_slam/install"
PROJECT_VERSION = "0.3.5"
| 42.888889
| 68
| 0.712435
|
f7f27a4456f17023f481f09eaf68cf69a116aa6e
| 8,974
|
py
|
Python
|
code/dgrechka/train_mobileNetV2_bottleneck.py
|
dgrechka/bengaliai-cv19
|
9ef15c5b140628337ae6efe0d76e7ec5d291dc17
|
[
"MIT"
] | null | null | null |
code/dgrechka/train_mobileNetV2_bottleneck.py
|
dgrechka/bengaliai-cv19
|
9ef15c5b140628337ae6efe0d76e7ec5d291dc17
|
[
"MIT"
] | null | null | null |
code/dgrechka/train_mobileNetV2_bottleneck.py
|
dgrechka/bengaliai-cv19
|
9ef15c5b140628337ae6efe0d76e7ec5d291dc17
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import sys
import os
sys.path.append(os.path.join(__file__,'..','..'))
from tfDataIngest import tfDataSetParquet as tfDsParquet
from tfDataIngest import tfDataSetParquetAnnotateTrain as tfDsParquetAnnotation
import os
import pandas as pd
from tqdm import tqdm
from glob import glob
from models.MobileNetV2 import GetModel
inputDataDir = sys.argv[1]
validationFile = sys.argv[2]
experiment_output_dir = sys.argv[3]
dropoutRate = 0.2
batchSize = 8
seed = 313143
print("validation set samples listing: {0}".format(validationFile))
valDf = pd.read_csv(validationFile)
valIds = set(valDf.image_id)
print("{0} samples will be used for validation".format(len(valIds)))
if __name__ == "__main__":
tf.random.set_seed(seed+563)
print("Data dir is {0}".format(inputDataDir))
dataFileNames = glob("{0}/train*.parquet".format(inputDataDir))
trainLabelsFileName = "{0}/train.csv".format(inputDataDir)
N = len(pd.read_csv(trainLabelsFileName))
#N = 5000
print("There are {0} training samples in total".format(N))
print("Parquet files count is {0}".format(len(dataFileNames)))
print("First is {0}".format(dataFileNames[0]))
def constructAllSamplesDs():
ds = tfDsParquet.create_parquet_dataset(dataFileNames)
ds = tfDsParquetAnnotation.annotate(ds,trainLabelsFileName)
return ds
# reshaping to match the input shape
def prepareInput(_,labels,pixels):
#pixels = tf.cast(pixels, tf.float32)
root,vowel,consonant = tf.unstack(labels,3)
root = tf.one_hot(root, 168, dtype=tf.uint8)
vowel = tf.one_hot(vowel, 11, dtype=tf.uint8)
consonant = tf.one_hot(consonant, 7, dtype=tf.uint8)
colored = tf.tile(tf.expand_dims(pixels,-1),[1,1,3])
pixels = tf.image.resize(colored, [224,224], method='gaussian')
#HEIGHT = 137
#WIDTH = 236
#pixels = tf.pad(colored,[[43,44],[0,0],[0,0]])[:,6:230,:]
labelsDict = {
"root": tf.reshape(root,(168,)),
"vowel": tf.reshape(vowel,(11,)),
"consonant": tf.reshape(consonant,(7,))
}
return pixels, labelsDict
def inValidationFilter(ident):
identBytes = ident.numpy()
identStr = identBytes.decode('utf-8')
return identStr in valIds
def inValFilter(ident,_dummy_1,_dummy_2):
return tf.py_function(inValidationFilter, [ident], (tf.bool))
def inTrainFilter(ident,_dummy_1,_dummy_2):
return not(tf.py_function(inValidationFilter, [ident], (tf.bool)))
allDs = constructAllSamplesDs()
allDs = allDs.take(N)
allDs = allDs.cache()
trDs = allDs.filter(inTrainFilter)
trDs = trDs.map(prepareInput) #tf.data.experimental.AUTOTUNE
#trDs = trDs.take(1000)
#trDs = trDs.cache(os.path.join(cacheLocation,'trCache'))
print("Caching all DS")
#for element in tqdm(allDs.as_numpy_iterator(),ascii=True,total=N):
for element in allDs.as_numpy_iterator():
()
trDs = trDs.repeat()
#trDs = trDs.prefetch(128)
trDs = trDs.shuffle(512,seed=seed+123678, reshuffle_each_iteration=True)
trDs = trDs.batch(batchSize)
#trDs = trDs.prefetch(128)
#valDs = constructAllSamplesDs()
valDs = allDs.filter(inValFilter)
valDs = valDs.map(prepareInput)
valDs = valDs.batch(batchSize)
#valDs = valDs.cache(os.path.join(cacheLocation,'vaCache'))
#valDs = valDs.cache()
print("Training dataSet is {0}".format(trDs))
print("Validation dataSet is {0}".format(valDs))
model,cnn = GetModel(dropoutRate, seed+44)
print("Model constructed")
print(model.summary())
def catCeFromLogitsDoubled(y_true, y_pred):
return tf.keras.losses.categorical_crossentropy(y_true, y_pred, from_logits=True)*2.0
def catCeFromLogits(y_true, y_pred):
return tf.keras.losses.categorical_crossentropy(y_true, y_pred, from_logits=True)
class RecallForLogits2(tf.keras.metrics.Metric):
def __init__(self, name='recall', **kwargs):
self.M = 200
super(RecallForLogits, self).__init__(name=name, **kwargs)
self.true_positives = self.add_weight(name='tp', initializer='zeros',shape=(self.M,))
self.false_negatives = self.add_weight(name='fn', initializer='zeros',shape=(self.M,))
def update_state(self, y_true, y_pred_logit, sample_weight=None):
# shape is: B x M(ClassesCount)
#y_pred_shape = tf.shape(y_pred_logit)
#B = y_pred_shape[0]
idx_max = tf.math.argmax(y_pred_logit,1) # (B,)
y_pred_bool = tf.one_hot(idx_max,self.M,on_value=True, off_value=False) # BxM
#print("y_pred_bool shape: {0}".format(y_pred_bool.shape))
#y_pred_bool = tf.expand_dims(y_pred_bool,0)
#y_pred_bool = tf.tile(y_pred_bool,[B,1])
y_true_bool = tf.cast(y_true,dtype=tf.bool)
not_pred_bool = tf.math.logical_not(y_pred_bool)
localTP = tf.math.reduce_sum(tf.cast(tf.math.logical_and(y_pred_bool,y_true_bool),dtype=tf.float32),0) # along Batch
localFN = tf.math.reduce_sum(tf.cast(tf.math.logical_and(not_pred_bool,y_true_bool),dtype=tf.float32),0) # along Batch
# print("true_positives shape: {0}".format(self.true_positives.shape))
# print("false_negatives shape: {0}".format(self.false_negatives.shape))
# print("localTP shape: {0}".format(localTP.shape))
# print("localFN shape: {0}".format(localFN.shape))
self.true_positives.assign_add(localTP)
self.false_negatives.assign_add(localFN)
def result(self):
print("result self.true_positives shape: {0}".format(self.true_positives.shape))
nom = tf.cast(self.true_positives,dtype=tf.float32) # shape (M,)
denom = tf.cast(self.true_positives + self.false_negatives,dtype=tf.float32) # shape (M,)
print("denom shape: {0}".format(denom.shape))
perClassRecall = tf.cond(denom < 0.5, lambda: tf.zeros([self.M],dtype=tf.float32), lambda: nom/denom)
print("perClassRecall shape: {0}".format(perClassRecall.shape))
macroRecallNom = tf.math.reduce_sum(perClassRecall)
print("macroRecallNom shape: {0}".format(macroRecallNom.shape))
macroRecallDenom = tf.reduce_sum(tf.cast(denom > 0.0,dtype=tf.float32))
print("macroRecallDenom shape: {0}".format(macroRecallDenom.shape))
macroRecall = macroRecallNom/macroRecallDenom
print("macroRecall shape: {0}".format(macroRecall.shape))
return macroRecall
class RecallForLogits(tf.keras.metrics.Recall):
def __init__(self, name='recall', **kwargs):
super(RecallForLogits, self).__init__(name=name, **kwargs)
def update_state(self, y_true, y_pred, sample_weight=None):
probs = tf.nn.softmax(y_pred)
super().update_state(y_true, probs, sample_weight)
def result(self):
return super().result()
model.compile(
#optimizer=tf.keras.optimizers.SGD(momentum=.5,nesterov=True, clipnorm=1.),
#optimizer=tf.keras.optimizers.RMSprop(),
optimizer=tf.keras.optimizers.Adam(learning_rate=1e-4),
loss= {
"root":catCeFromLogitsDoubled,
"vowel":catCeFromLogits,
"consonant":catCeFromLogits
},
metrics=[RecallForLogits()]
)
print("model compiled")
print(model.summary())
csv_logger = tf.keras.callbacks.CSVLogger(os.path.join(experiment_output_dir,'training_log.csv'),append=False)
callbacks = [
# Interrupt training if `val_loss` stops improving for over 2 epochs
tf.keras.callbacks.EarlyStopping(patience=int(5), monitor='root_loss',mode='min'),
# Write TensorBoard logs to `./logs` directory
# tf.keras.callbacks.TensorBoard(log_dir=experiment_output_dir, histogram_freq = 0, profile_batch=0),
tf.keras.callbacks.ModelCheckpoint(
filepath=os.path.join(experiment_output_dir,"weights.hdf5"),
save_best_only=True,
verbose=True,
mode='min',
save_weights_only=True,
monitor='root_loss'),
tf.keras.callbacks.TerminateOnNaN(),
csv_logger,
#reduce_lr
]
spe = (N-len(valIds))//batchSize
#spe = N//batchSize
print("Steps per epoch {0}".format(spe))
fitHisotry = model.fit(x = trDs, \
validation_data = valDs,
verbose = 2,
callbacks=callbacks,
shuffle=False, # dataset is shuffled explicilty
steps_per_epoch= spe,
#steps_per_epoch= N//batchSize,
#steps_per_epoch= 4096,
#epochs=int(10000)
epochs = 10
)
print("Done")
| 39.359649
| 130
| 0.642077
|
6dc69db7e078fd8ca04b88890e35ac34dc3faf47
| 30,304
|
py
|
Python
|
adafruit_clue.py
|
FoamyGuy/Adafruit_CircuitPython_CLUE
|
89fec08cd82f6f6243f0f448e24c029e78063756
|
[
"MIT"
] | 1
|
2020-03-02T20:51:47.000Z
|
2020-03-02T20:51:47.000Z
|
adafruit_clue.py
|
FoamyGuy/Adafruit_CircuitPython_CLUE
|
89fec08cd82f6f6243f0f448e24c029e78063756
|
[
"MIT"
] | null | null | null |
adafruit_clue.py
|
FoamyGuy/Adafruit_CircuitPython_CLUE
|
89fec08cd82f6f6243f0f448e24c029e78063756
|
[
"MIT"
] | 2
|
2020-03-05T19:43:45.000Z
|
2020-03-08T19:29:57.000Z
|
# The MIT License (MIT)
#
# Copyright (c) 2020 Kattni Rembor for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_clue`
================================================================================
A high level library representing all the features of the Adafruit CLUE.
* Author(s): Kattni Rembor
Implementation Notes
--------------------
**Hardware:**
.. "* `Adafruit CLUE - nRF52840 Express with Bluetooth LE <https://www.adafruit.com/product/4500>`_"
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
* Adafruit's Bus Device library: https://github.com/adafruit/Adafruit_CircuitPython_BusDevice
* Adafruit's Register library: https://github.com/adafruit/Adafruit_CircuitPython_Register
* Adafruit's LSM6DS CircuitPython Library:
https://github.com/adafruit/Adafruit_CircuitPython_LSM6DS
* Adafruit's LIS3MDL CircuitPython Library:
https://github.com/adafruit/Adafruit_CircuitPython_LIS3MDL
* Adafruit's APDS9960 CircuitPython Library:
https://github.com/adafruit/Adafruit_CircuitPython_APDS9960
* Adafruit's BMP280 CircuitPython Library:
https://github.com/adafruit/Adafruit_CircuitPython_BMP280
* Adafruit's SHT31D CircuitPython Library:
https://github.com/adafruit/Adafruit_CircuitPython_SHT31D
* Adafruit's NeoPixel CircuitPython Library:
https://github.com/adafruit/Adafruit_CircuitPython_NeoPixel
"""
import time
import array
import math
import board
import digitalio
import neopixel
import adafruit_apds9960.apds9960
import adafruit_bmp280
import adafruit_lis3mdl
import adafruit_lsm6ds
import adafruit_sht31d
import audiobusio
import audiopwmio
import audiocore
import gamepad
import touchio
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_CLUE.git"
class _ClueSimpleTextDisplay:
"""Easily display lines of text on CLUE display."""
def __init__(self, title=None, title_color=0xFFFFFF, title_scale=1, # pylint: disable=too-many-arguments
text_scale=1, font=None, colors=None):
import displayio
import terminalio
from adafruit_display_text import label
if not colors:
colors = (Clue.VIOLET, Clue.GREEN, Clue.RED, Clue.CYAN, Clue.ORANGE,
Clue.BLUE, Clue.MAGENTA, Clue.SKY, Clue.YELLOW, Clue.PURPLE)
self._colors = colors
self._label = label
self._display = board.DISPLAY
self._font = terminalio.FONT
if font:
self._font = font
self.text_group = displayio.Group(max_size=20, scale=text_scale)
if title:
# Fail gracefully if title is longer than 60 characters.
if len(title) > 60:
raise ValueError("Title must be 60 characters or less.")
title = label.Label(self._font, text=title, max_glyphs=60, color=title_color,
scale=title_scale)
title.x = 0
title.y = 8
self._y = title.y + 18
self.text_group.append(title)
else:
self._y = 3
self._lines = []
for num in range(1):
self._lines.append(self.add_text_line(color=colors[num % len(colors)]))
def __getitem__(self, item):
"""Fetch the Nth text line Group"""
if len(self._lines) - 1 < item:
for _ in range(item - (len(self._lines) - 1)):
self._lines.append(self.add_text_line(color=self._colors[item % len(self._colors)]))
return self._lines[item]
def add_text_line(self, color=0xFFFFFF):
"""Adds a line on the display of the specified color and returns the label object."""
text_label = self._label.Label(self._font, text="", max_glyphs=45, color=color)
text_label.x = 0
text_label.y = self._y
self._y = text_label.y + 13
self.text_group.append(text_label)
return text_label
def show(self):
"""Call show() to display the data list."""
self._display.show(self.text_group)
def show_terminal(self):
"""Revert to terminalio screen."""
self._display.show(None)
class Clue: # pylint: disable=too-many-instance-attributes, too-many-public-methods
"""Represents a single CLUE."""
# Color variables available for import.
RED = (255, 0, 0)
YELLOW = (255, 255, 0)
ORANGE = (255, 150, 0)
GREEN = (0, 255, 0)
TEAL = (0, 255, 120)
CYAN = (0, 255, 255)
BLUE = (0, 0, 255)
PURPLE = (180, 0, 255)
MAGENTA = (255, 0, 150)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
GOLD = (255, 222, 30)
PINK = (242, 90, 255)
AQUA = (50, 255, 255)
JADE = (0, 255, 40)
AMBER = (255, 100, 0)
VIOLET = (255, 0, 255)
SKY = (0, 180, 255)
RAINBOW = (RED, ORANGE, YELLOW, GREEN, BLUE, PURPLE)
def __init__(self):
# Define I2C:
self._i2c = board.I2C()
# Define touch:
# Initially, self._touches stores the pin used for a particular touch. When that touch is
# used for the first time, the pin is replaced with the corresponding TouchIn object.
# This saves a little RAM over using a separate read-only pin tuple.
# For example, after `clue.touch_2`, self._touches is equivalent to:
# [board.D0, board.D1, touchio.TouchIn(board.D2)]
self._touches = [board.D0, board.D1, board.D2]
self._touch_threshold_adjustment = 0
# Define buttons:
self._a = digitalio.DigitalInOut(board.BUTTON_A)
self._a.switch_to_input(pull=digitalio.Pull.UP)
self._b = digitalio.DigitalInOut(board.BUTTON_B)
self._b.switch_to_input(pull=digitalio.Pull.UP)
self._gamepad = gamepad.GamePad(self._a, self._b)
# Define LEDs:
self._white_leds = digitalio.DigitalInOut(board.WHITE_LEDS)
self._white_leds.switch_to_output()
self._pixel = neopixel.NeoPixel(board.NEOPIXEL, 1)
self._red_led = digitalio.DigitalInOut(board.L)
self._red_led.switch_to_output()
# Define audio:
self._mic = audiobusio.PDMIn(board.MICROPHONE_CLOCK, board.MICROPHONE_DATA,
sample_rate=16000, bit_depth=16)
self._sample = None
self._samples = None
self._sine_wave = None
self._sine_wave_sample = None
# Define sensors:
# Accelerometer/gyroscope:
self._accelerometer = adafruit_lsm6ds.LSM6DS33(self._i2c)
# Magnetometer:
self._magnetometer = adafruit_lis3mdl.LIS3MDL(self._i2c)
# DGesture/proximity/color/light sensor:
self._sensor = adafruit_apds9960.apds9960.APDS9960(self._i2c)
# Humidity sensor:
self._humidity = adafruit_sht31d.SHT31D(self._i2c)
# Barometric pressure sensor:
self._pressure = adafruit_bmp280.Adafruit_BMP280_I2C(self._i2c)
# Create displayio object for passing.
self.display = board.DISPLAY
def _touch(self, i):
if not isinstance(self._touches[i], touchio.TouchIn):
# First time referenced. Get the pin from the slot for this touch
# and replace it with a TouchIn object for the pin.
self._touches[i] = touchio.TouchIn(self._touches[i])
self._touches[i].threshold += self._touch_threshold_adjustment
return self._touches[i].value
@property
def touch_0(self):
"""Detect touch on capacitive touch pad 0.
.. image :: ../docs/_static/pad_0.jpg
:alt: Pad 0
This example prints when pad 0 is touched.
To use with the CLUE:
.. code-block:: python
from adafruit_clue import clue
while True:
if clue.touch_0:
print("Touched pad 0")
"""
return self._touch(0)
@property
def touch_1(self):
"""Detect touch on capacitive touch pad 1.
.. image :: ../docs/_static/pad_1.jpg
:alt: Pad 1
This example prints when pad 1 is touched.
To use with the CLUE:
.. code-block:: python
from adafruit_clue import clue
while True:
if clue.touch_1:
print("Touched pad 1")
"""
return self._touch(1)
@property
def touch_2(self):
"""Detect touch on capacitive touch pad 2.
.. image :: ../docs/_static/pad_2.jpg
:alt: Pad 2
This example prints when pad 2 is touched.
To use with the CLUE:
.. code-block:: python
from adafruit_clue import clue
while True:
if clue.touch_2:
print("Touched pad 2")
"""
return self._touch(2)
@property
def button_a(self):
"""``True`` when Button A is pressed. ``False`` if not.
.. image :: ../docs/_static/button_a.jpg
:alt: Button A
This example prints when button A is pressed.
To use with the CLUE:
.. code-block:: python
from adafruit_clue import clue
while True:
if clue.button_a:
print("Button A pressed")
"""
return not self._a.value
@property
def button_b(self):
"""``True`` when Button B is pressed. ``False`` if not.
.. image :: ../docs/_static/button_b.jpg
:alt: Button B
This example prints when button B is pressed.
To use with the CLUE:
.. code-block:: python
from adafruit_clue import clue
while True:
if clue.button_b:
print("Button B pressed")
"""
return not self._b.value
@property
def were_pressed(self):
"""Returns a set of the buttons that have been pressed.
.. image :: ../docs/_static/button_b.jpg
:alt: Button B
To use with the CLUE:
.. code-block:: python
from adafruit_clue import clue
while True:
print(clue.were_pressed)
"""
ret = set()
pressed = self._gamepad.get_pressed()
for button, mask in (('A', 0x01), ('B', 0x02)):
if mask & pressed:
ret.add(button)
return ret
def shake(self, shake_threshold=30, avg_count=10, total_delay=0.1):
"""
Detect when the accelerometer is shaken. Optional parameters:
:param shake_threshold: Increase or decrease to change shake sensitivity. This
requires a minimum value of 10. 10 is the total
acceleration if the board is not moving, therefore
anything less than 10 will erroneously report a constant
shake detected. (Default 30)
:param avg_count: The number of readings taken and used for the average
acceleration. (Default 10)
:param total_delay: The total time in seconds it takes to obtain avg_count
readings from acceleration. (Default 0.1)
"""
shake_accel = (0, 0, 0)
for _ in range(avg_count):
# shake_accel creates a list of tuples from acceleration data.
# zip takes multiple tuples and zips them together, as in:
# In : zip([-0.2, 0.0, 9.5], [37.9, 13.5, -72.8])
# Out: [(-0.2, 37.9), (0.0, 13.5), (9.5, -72.8)]
# map applies sum to each member of this tuple, resulting in a
# 3-member list. tuple converts this list into a tuple which is
# used as shake_accel.
shake_accel = tuple(map(sum, zip(shake_accel, self.acceleration)))
time.sleep(total_delay / avg_count)
avg = tuple(value / avg_count for value in shake_accel)
total_accel = math.sqrt(sum(map(lambda x: x * x, avg)))
return total_accel > shake_threshold
@property
def acceleration(self):
"""Obtain acceleration data from the x, y and z axes.
.. image :: ../docs/_static/accelerometer.jpg
:alt: Accelerometer
This example prints the values. Try moving the board to see how the printed values change.
To use with the CLUE:
.. code-block:: python
from adafruit_clue import clue
while True:
print("Accel: {:.2f} {:.2f} {:.2f}".format(*clue.acceleration))
"""
return self._accelerometer.acceleration
@property
def gyro(self):
"""Obtain x, y, z angular velocity values in degrees/second.
.. image :: ../docs/_static/accelerometer.jpg
:alt: Gyro
This example prints the values. Try moving the board to see how the printed values change.
To use with the CLUE:
.. code-block:: python
from adafruit_clue import clue
while True:
print("Gyro: {:.2f} {:.2f} {:.2f}".format(*clue.gyro))
"""
return self._accelerometer.gyro
@property
def magnetic(self):
"""Obtain x, y, z magnetic values in microteslas.
.. image :: ../docs/_static/magnetometer.jpg
:alt: Magnetometer
This example prints the values. Try moving the board to see how the printed values change.
To use with the CLUE:
.. code-block:: python
from adafruit_clue import clue
while True:
print("Magnetic: {:.3f} {:.3f} {:.3f}".format(*clue.magnetic))
"""
return self._magnetometer.magnetic
@property
def proximity(self):
"""A relative proximity to the sensor in values from 0 - 255.
.. image :: ../docs/_static/proximity.jpg
:alt: Proximity sensor
This example prints the value. Try moving your hand towards and away from the front of the
board to see how the printed values change.
To use with the CLUE:
.. code-block:: python
from adafruit_clue import clue
while True:
print("Proximity: {}".format(clue.proximity))
"""
self._sensor.enable_proximity = True
return self._sensor.proximity
@property
def color(self):
"""The red, green, blue, and clear light values. (r, g, b, c)
.. image :: ../docs/_static/proximity.jpg
:alt: Color sensor
This example prints the values. Try holding something up to the sensor to see the values
change. Works best with white LEDs enabled.
To use with the CLUE:
.. code-block:: python
from adafruit_clue import clue
while True:
print("Color: R: {} G: {} B: {} C: {}".format(*clue.color))
"""
self._sensor.enable_color = True
return self._sensor.color_data
@property
def gesture(self):
"""A gesture code if gesture is detected. Shows ``0`` if no gesture detected.
``1`` if an UP gesture is detected, ``2`` if DOWN, ``3`` if LEFT, and ``4`` if RIGHT.
.. image :: ../docs/_static/proximity.jpg
:alt: Gesture sensor
This example prints the gesture values. Try moving your hand up, down, left or right over
the sensor to see the value change.
To use with the CLUE:
.. code-block:: python
from adafruit_clue import clue
while True:
print("Gesture: {}".format(clue.gesture))
"""
self._sensor.enable_gesture = True
return self._sensor.gesture()
@property
def humidity(self):
"""The measured relative humidity in percent.
.. image :: ../docs/_static/humidity.jpg
:alt: Humidity sensor
This example prints the value. Try breathing on the sensor to see the values change.
To use with the CLUE:
.. code-block:: python
from adafruit_clue import clue
while True:
print("Humidity: {:.1f}%".format(clue.humidity))
"""
return self._humidity.relative_humidity
@property
def pressure(self):
"""The barometric pressure in hectoPascals.
.. image :: ../docs/_static/pressure.jpg
:alt: Barometric pressure sensor
This example prints the value.
To use with the CLUE:
.. code-block:: python
from adafruit_clue import clue
print("Pressure: {:.3f}hPa".format(clue.pressure))
"""
return self._pressure.pressure
@property
def temperature(self):
"""The temperature in degrees Celsius.
.. image :: ../docs/_static/pressure.jpg
:alt: Temperature sensor
This example prints the value. Try touching the sensor to see the value change.
To use with the CLUE:
.. code-block:: python
from adafruit_clue import clue
print("Temperature: {:.1f}C".format(clue.temperature))
"""
return self._pressure.temperature
@property
def altitude(self):
"""The altitude in meters based on the sea level pressure at your location. You must set
``sea_level_pressure`` to receive an accurate reading.
.. image :: ../docs/_static/pressure.jpg
:alt: Altitude sensor
This example prints the value. Try moving the board vertically to see the value change.
.. code-block:: python
from adafruit_clue import clue
clue.sea_level_pressure = 1015
print("Altitude: {:.1f}m".format(clue.altitude))
"""
return self._pressure.altitude
@property
def sea_level_pressure(self):
"""Set to the pressure at sea level at your location, before reading altitude for
the most accurate altitude measurement.
.. image :: ../docs/_static/pressure.jpg
:alt: Barometric pressure sensor
This example prints the value.
To use with the CLUE:
.. code-block:: python
from adafruit_clue import clue
clue.sea_level_pressure = 1015
print("Pressure: {:.3f}hPa".format(clue.pressure))
"""
return self._pressure.sea_level_pressure
@sea_level_pressure.setter
def sea_level_pressure(self, value):
self._pressure.sea_level_pressure = value
@property
def white_leds(self):
"""The red led next to the USB plug labeled LED.
.. image :: ../docs/_static/white_leds.jpg
:alt: White LEDs
This example turns on the white LEDs.
To use with the CLUE:
.. code-block:: python
from adafruit_clue import clue
clue.white_leds = True
"""
return self._white_leds.value
@white_leds.setter
def white_leds(self, value):
self._white_leds.value = value
@property
def red_led(self):
"""The red led next to the USB plug labeled LED.
.. image :: ../docs/_static/red_led.jpg
:alt: Red LED
This example turns on the red LED.
To use with the CLUE:
.. code-block:: python
from adafruit_clue import clue
clue.red_led = True
"""
return self._red_led.value
@red_led.setter
def red_led(self, value):
self._red_led.value = value
@property
def pixel(self):
"""The NeoPixel RGB LED.
.. image :: ../docs/_static/neopixel.jpg
:alt: NeoPixel
This example turns the NeoPixel purple.
To use with the CLUE:
.. code-block:: python
from adafruit_clue import clue
while True:
clue.pixel.fill((255, 0, 255))
"""
return self._pixel
@staticmethod
def _sine_sample(length):
tone_volume = (2 ** 15) - 1
shift = 2 ** 15
for i in range(length):
yield int(tone_volume * math.sin(2*math.pi*(i / length)) + shift)
def _generate_sample(self, length=100):
if self._sample is not None:
return
self._sine_wave = array.array("H", self._sine_sample(length))
self._sample = audiopwmio.PWMAudioOut(board.SPEAKER)
self._sine_wave_sample = audiocore.RawSample(self._sine_wave)
def play_tone(self, frequency, duration):
""" Produce a tone using the speaker. Try changing frequency to change
the pitch of the tone.
:param int frequency: The frequency of the tone in Hz
:param float duration: The duration of the tone in seconds
.. image :: ../docs/_static/speaker.jpg
:alt: Speaker
This example plays a 880 Hz tone for a duration of 1 second.
To use with the CLUE:
.. code-block:: python
from adafruit_clue import clue
clue.play_tone(880, 1)
"""
# Play a tone of the specified frequency (hz).
self.start_tone(frequency)
time.sleep(duration)
self.stop_tone()
def start_tone(self, frequency):
""" Produce a tone using the speaker. Try changing frequency to change
the pitch of the tone.
:param int frequency: The frequency of the tone in Hz
.. image :: ../docs/_static/speaker.jpg
:alt: Speaker
This example plays a 523Hz tone when button A is pressed and a 587Hz tone when button B is
pressed, only while the buttons are being pressed.
To use with the CLUE:
.. code-block:: python
from adafruit_clue import clue
while True:
if clue.button_a:
clue.start_tone(523)
elif clue.button_b:
clue.start_tone(587)
else:
clue.stop_tone()
"""
length = 100
if length * frequency > 350000:
length = 350000 // frequency
self._generate_sample(length)
# Start playing a tone of the specified frequency (hz).
self._sine_wave_sample.sample_rate = int(len(self._sine_wave) * frequency)
if not self._sample.playing:
self._sample.play(self._sine_wave_sample, loop=True)
def stop_tone(self):
""" Use with start_tone to stop the tone produced.
.. image :: ../docs/_static/speaker.jpg
:alt: Speaker
This example plays a 523Hz tone when button A is pressed and a 587Hz tone when button B is
pressed, only while the buttons are being pressed.
To use with the CLUE:
.. code-block:: python
from adafruit_clue import clue
while True:
if clue.button_a:
clue.start_tone(523)
elif clue.button_b:
clue.start_tone(587)
else:
clue.stop_tone()
"""
# Stop playing any tones.
if self._sample is not None and self._sample.playing:
self._sample.stop()
self._sample.deinit()
self._sample = None
@staticmethod
def _normalized_rms(values):
mean_values = int(sum(values) / len(values))
return math.sqrt(sum(float(sample - mean_values) * (sample - mean_values)
for sample in values) / len(values))
@property
def sound_level(self):
"""Obtain the sound level from the microphone (sound sensor).
.. image :: ../docs/_static/microphone.jpg
:alt: Microphone (sound sensor)
This example prints the sound levels. Try clapping or blowing on
the microphone to see the levels change.
.. code-block:: python
from adafruit_clue import clue
while True:
print(clue.sound_level)
"""
if self._sample is None:
self._samples = array.array('H', [0] * 160)
self._mic.record(self._samples, len(self._samples))
return self._normalized_rms(self._samples)
def loud_sound(self, sound_threshold=200):
"""Utilise a loud sound as an input.
:param int sound_threshold: Threshold sound level must exceed to return true (Default: 200)
.. image :: ../docs/_static/microphone.jpg
:alt: Microphone (sound sensor)
This example turns the NeoPixel LED blue each time you make a loud sound.
Try clapping or blowing onto the microphone to trigger it.
.. code-block:: python
from adafruit_clue import clue
while True:
if clue.loud_sound():
clue.pixel.fill((0, 50, 0))
else:
clue.pixel.fill(0)
You may find that the code is not responding how you would like.
If this is the case, you can change the loud sound threshold to
make it more or less responsive. Setting it to a higher number
means it will take a louder sound to trigger. Setting it to a
lower number will take a quieter sound to trigger. The following
example shows the threshold being set to a higher number than
the default.
.. code-block:: python
from adafruit_clue import clue
while True:
if clue.loud_sound(sound_threshold=300):
clue.pixel.fill((0, 50, 0))
else:
clue.pixel.fill(0)
"""
return self.sound_level > sound_threshold
@staticmethod
def simple_text_display(title=None, title_color=(255, 255, 255), title_scale=1, # pylint: disable=too-many-arguments
text_scale=1, font=None, colors=None):
"""Display lines of text on the CLUE display. Lines of text are created in order as shown
in the example below. If you skip a number, the line will be shown blank on the display,
e.g. if you include ``[0]`` and ``[2]``, the second line on the display will be empty, and
the text specified for lines 0 and 2 will be displayed on the first and third line.
Remember, Python begins counting at 0, so the first line on the display is 0 in the code.
Setup occurs before the loop. For data to be dynamically updated on the display, you must
include the data call in the loop by using ``.text =``. For example, if setup is saved as
``clue_data = simple_text_display()`` then ``clue_data[0].text = clue.proximity`` must be
inside the ``while True:`` loop for the proximity data displayed to update as the
values change. You must call ``show()`` at the end of the list for anything to display.
See example below for usage.
:param str title: The title displayed above the data. Set ``title="Title text"`` to provide
a title. Defaults to None.
:param title_color: The color of the title. Not necessary if no title is provided. Defaults
to white (255, 255, 255).
:param int title_scale: Scale the size of the title. Not necessary if no title is provided.
Defaults to 1.
:param int text_scale: Scale the size of the data lines. Scales the title as well.
Defaults to 1.
:param str font: The font to use to display the title and data. Defaults to built in
``terminalio.FONT``.
:param colors: A list of colors for the lines of data on the display. If you provide a
single color, all lines will be that color. Otherwise it will cycle through
the list you provide if the list is less than the number of lines displayed.
Default colors are used if ``colors`` is not set. For example, if creating
two lines of data, ``colors=((255, 255, 255), (255, 0, 0))`` would set the
first line white and the second line red, and if you created four lines of
data with the same setup, it would alternate white and red.
.. image :: ../docs/_static/display_clue_data.jpg
:alt: Display Clue Data demo
This example displays three lines with acceleration, gyro and magnetic data on the display.
Remember to call ``show()`` after the list to update the display.
.. code-block:: python
from adafruit_clue import clue
clue_data = clue.simple_text_display(title="CLUE Sensor Data!", title_scale=2)
while True:
clue_data[0].text = "Acceleration: {:.2f} {:.2f} {:.2f}".format(*clue.acceleration)
clue_data[1].text = "Gyro: {:.2f} {:.2f} {:.2f}".format(*clue.gyro)
clue_data[2].text = "Magnetic: {:.3f} {:.3f} {:.3f}".format(*clue.magnetic)
clue_data.show()
"""
return _ClueSimpleTextDisplay(title=title, title_color=title_color, title_scale=title_scale,
text_scale=text_scale, font=font, colors=colors)
clue = Clue() # pylint: disable=invalid-name
"""Object that is automatically created on import.
To use, simply import it from the module:
.. code-block:: python
from adafruit_clue import clue
"""
| 32.725702
| 121
| 0.604937
|
cc82f34f1f10d07b400ac0e00cfa3e586ad65dee
| 19,263
|
py
|
Python
|
src/python/bot/minimizer/minimizer.py
|
kcwu/clusterfuzz
|
acf85a83e4bbbf3da8d9e377ddf3e7754f74224e
|
[
"Apache-2.0"
] | 4
|
2019-11-26T01:50:51.000Z
|
2021-08-14T20:32:43.000Z
|
src/python/bot/minimizer/minimizer.py
|
kcwu/clusterfuzz
|
acf85a83e4bbbf3da8d9e377ddf3e7754f74224e
|
[
"Apache-2.0"
] | 22
|
2019-12-26T17:02:34.000Z
|
2022-03-21T22:16:52.000Z
|
src/python/bot/minimizer/minimizer.py
|
kcwu/clusterfuzz
|
acf85a83e4bbbf3da8d9e377ddf3e7754f74224e
|
[
"Apache-2.0"
] | 2
|
2019-02-09T09:09:20.000Z
|
2019-02-15T05:25:13.000Z
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base classes for other minimizers."""
import copy
import functools
import os
import tempfile
import time
import threading
import errors
DEFAULT_CLEANUP_INTERVAL = 20
DEFAULT_THREAD_COUNT = 8
DEFAULT_TESTS_PER_THREAD = 4
MAX_MERGE_BATCH_SIZE = 32
PROGRESS_REPORT_INTERVAL = 300
class DummyLock(object):
"""Dummy to replace threading.Lock for single-threaded tests."""
def __enter__(self):
pass
def __exit__(self, exec_type, value, traceback):
pass
def __nonzero__(self):
return False
class TestQueue(object):
"""Queue to store commands that should be executed to test hypotheses."""
def __init__(self,
thread_count,
deadline_check=None,
progress_report_function=None,
per_thread_cleanup_function=None):
self.thread_count = thread_count
self.deadline_check = deadline_check
self.progress_report_function = progress_report_function
self.per_thread_cleanup_function = per_thread_cleanup_function
self.lock = threading.Lock()
self.queue = []
def _pop(self):
"""Pull a single hypothesis to process from the queue."""
with self.lock:
if not self.queue:
return None
return self.queue.pop(0)
def _work(self):
"""Process items from the queue until it is empty."""
while not self.deadline_check or not self.deadline_check(soft_check=True):
current_item = self._pop()
if not current_item:
break
test, test_function, completion_callback, should_run = current_item # pylint: disable=unpacking-non-sequence
if not should_run():
continue
result = test_function(test)
completion_callback(result)
if self.per_thread_cleanup_function:
self.per_thread_cleanup_function()
# Abort if we have exceeded the deadline for this operation.
if self.deadline_check and self.deadline_check(soft_check=True):
break
def _cleanup(self):
"""Clean up the queue to be sure that no more tasks will be executed."""
with self.lock:
self.queue = []
def push(self,
test,
test_function,
completion_callback,
should_run=lambda: True):
"""Add a test to the queue and a callback to run on completion."""
with self.lock:
self.queue.append((test, test_function, completion_callback, should_run))
def force(self,
test,
test_function,
completion_callback,
should_run=lambda: True):
"""Force a test to the front of the queue."""
entry = (test, test_function, completion_callback, should_run)
with self.lock:
self.queue.insert(0, entry)
def size(self):
"""Return the number of unprocessed tasks in the queue."""
return len(self.queue)
def process(self):
"""Process all tests in the queue and block until completion."""
while self.queue:
threads = [
threading.Thread(target=self._work) for _ in xrange(self.thread_count)
]
for thread in threads:
thread.start()
while any([thread.is_alive() for thread in threads]):
if self.deadline_check:
self.deadline_check(cleanup_function=self._cleanup)
if self.progress_report_function:
self.progress_report_function()
time.sleep(1)
class Testcase(object):
"""Single test case to be minimized."""
def __init__(self, data, minimizer):
self.minimizer = minimizer
if minimizer.tokenize:
self.tokens = minimizer.tokenizer(data)
else:
self.tokens = data
self.required_tokens = [True] * len(self.tokens)
self.tested_hypotheses = set()
self.unmerged_failing_hypotheses = []
self.tests_to_queue = []
self.currently_processing = False
self.last_progress_report_time = 0
self.runs_since_last_cleanup = 0
if minimizer.max_threads > 1:
self.test_queue = TestQueue(
minimizer.max_threads,
deadline_check=self._deadline_exceeded,
progress_report_function=self._report_progress)
self.merge_preparation_lock = threading.Lock()
self.merge_lock = threading.Lock()
self.cache_lock = threading.Lock()
self.tests_to_queue_lock = threading.Lock()
else:
self.test_queue = None
self.merge_preparation_lock = DummyLock()
self.merge_lock = DummyLock()
self.cache_lock = DummyLock()
self.tests_to_queue_lock = DummyLock()
def __str__(self):
"""Return the string form of the minimized test case (at this point)."""
return self.minimizer.token_combiner(self.get_required_tokens())
# Helper functions based on minimizer configuration.
def _deadline_exceeded(self, cleanup_function=None, soft_check=False):
"""Check to see if we have exceeded the deadline for execution."""
if self.minimizer.deadline and time.time() > self.minimizer.deadline:
if soft_check:
return True
# If we are here, we have exceeded the deadline on a hard check. Clean up.
if cleanup_function:
cleanup_function()
if self.minimizer.cleanup_function:
self.minimizer.cleanup_function()
# Raise an exception if this is not a soft deadline check.
raise errors.MinimizationDeadlineExceededError(self)
return False
def _delete_file_if_needed(self, input_file):
"""Deletes a temporary file if necessary."""
# If we are not running in a mode where we need to delete files, do nothing.
if not self.minimizer.tokenize or not self.minimizer.delete_temp_files:
return
try:
os.remove(input_file)
except OSError:
pass
def _report_progress(self):
"""Call a function to report progress if the minimizer uses one."""
if not self.minimizer.progress_report_function:
return
if time.time() - self.last_progress_report_time < PROGRESS_REPORT_INTERVAL:
return
self.last_progress_report_time = time.time()
message = '%d/%d tokens remaining.' % (len(self.get_required_tokens()),
len(self.required_tokens))
self.minimizer.progress_report_function(message)
# Functions used when preparing tests.
def _range_complement(self, current_range):
"""Return required tokens in the complement of the specified range."""
result = xrange(len(self.tokens))
to_remove = set(current_range)
return [i for i in result if i not in to_remove and self.required_tokens[i]]
def _prepare_test_input(self, tokens, tested_tokens):
"""Write the tokens currently being tested to a temporary file."""
tested_tokens = set(tested_tokens)
current_tokens = [t for i, t in enumerate(tokens) if i in tested_tokens]
if not self.minimizer.tokenize:
return current_tokens
data = self.minimizer.token_combiner(current_tokens)
handle = self.minimizer.get_temp_file()
destination = handle.name
try:
handle.write(data)
except IOError:
# We may have filled the disk. Try processing tests and writing again.
self._do_single_pass_process()
handle.write(data)
handle.close()
return destination
def _get_test_file(self, hypothesis):
"""Return a test file for a hypothesis."""
complement = self._range_complement(hypothesis)
return self._prepare_test_input(self.tokens, complement)
def _push_test_to_queue(self, hypothesis):
"""Add a test for a hypothesis to a queue for processing."""
test_file = self._get_test_file(hypothesis)
callback = functools.partial(
self._handle_completed_test,
hypothesis=hypothesis,
input_file=test_file)
should_run = functools.partial(self._contains_required_tokens, hypothesis,
test_file)
self.test_queue.push(
test_file,
self.minimizer.test_function,
callback,
should_run=should_run)
# Make sure that we do not let too many unprocessed tests build up.
if self.test_queue.size() >= self.minimizer.batch_size:
self._do_single_pass_process()
def prepare_test(self, hypothesis):
"""Prepare the test based on the mode we are running in."""
# Check the cache to make sure we have not tested this before.
if self._has_tested(hypothesis):
return
# If we are single-threaded, just run and process results immediately.
if not self.test_queue:
# In the threaded case, we call the cleanup function before each pass
# over the queue. It needs to be tracked here for the single-thread case.
self.runs_since_last_cleanup += 1
if (self.runs_since_last_cleanup >=
self.minimizer.single_thread_cleanup_interval and
self.minimizer.cleanup_function):
self.minimizer.cleanup_function()
test_file = self._get_test_file(hypothesis)
if self._contains_required_tokens(hypothesis, test_file):
self._handle_completed_test(
self.minimizer.test_function(test_file), hypothesis, test_file)
# Check to see if we have exceeded the deadline and report progress.
self._report_progress()
self._deadline_exceeded()
return
if self.currently_processing:
# If we are processing, we cannot write more tests or add to the queue.
with self.tests_to_queue_lock:
self.tests_to_queue.append(hypothesis)
else:
self._push_test_to_queue(hypothesis)
# Functions used when processing test results.
def _handle_completed_test(self, test_passed, hypothesis, input_file):
"""Update state based on the test result and hypothesis."""
# If the test failed, handle the result.
if not test_passed:
self._handle_failing_hypothesis(hypothesis)
# Delete leftover files if necessary.
self._delete_file_if_needed(input_file)
# Minimizers may need to do something with the test result.
self._process_test_result(test_passed, hypothesis)
def _process_test_result(self, test_passed, hypothesis):
"""Additional processing of the result. Minimizers may override this."""
pass
def _handle_failing_hypothesis(self, hypothesis):
"""Update the token list for a failing hypothesis."""
if not self.test_queue:
# We aren't multithreaded, so just update the list directly.
for token in hypothesis:
self.required_tokens[token] = False
return
with self.merge_preparation_lock:
self.unmerged_failing_hypotheses.append(hypothesis)
if len(self.unmerged_failing_hypotheses) < MAX_MERGE_BATCH_SIZE:
return
hypotheses_to_merge = self.unmerged_failing_hypotheses
self.unmerged_failing_hypotheses = []
# We may need to block while the previous batch is merging. If not, the
# results from this batch could conflict with the results from the previous.
with self.merge_lock:
self._attempt_merge(hypotheses_to_merge)
def _attempt_merge(self, hypotheses, sibling_merge_succeeded=False):
"""Update the required token list if the queued changes don't conflict."""
# If there's nothing to merge, we're done.
if not hypotheses:
return
aggregate_tokens = set()
for hypothesis in hypotheses:
for token in hypothesis:
aggregate_tokens.add(token)
aggregate_hypothesis = list(aggregate_tokens)
if sibling_merge_succeeded:
# We were able to remove all tokens from the other half of this
# hypothesis, so we can assume that this would fail without running the
# test. If this would also pass, there would not have been a conflict
# while testing this set. Well, this could be a flaky test, but then we
# have bigger problems.
test_passed = True
else:
complement = self._range_complement(aggregate_hypothesis)
test_file = self._prepare_test_input(self.tokens, complement)
test_passed = self.minimizer.test_function(test_file)
self._delete_file_if_needed(test_file)
# Failed (crashed), so there was no conflict here.
if not test_passed:
for token in aggregate_hypothesis:
self.required_tokens[token] = False
return True
# Passed (no crash). We need to try a bit harder to resolve this conflict.
if len(hypotheses) == 1:
# We really cannot remove this token. No additional work to be done.
return False
front = hypotheses[:len(hypotheses) / 2]
back = hypotheses[len(hypotheses) / 2:]
# If we could remove either one of two hypotheses, favor removing the first.
front_merged_successfully = self._attempt_merge(front)
self._attempt_merge(back, sibling_merge_succeeded=front_merged_successfully)
return False
def _do_single_pass_process(self):
"""Process through a single pass of our test queue."""
self.currently_processing = True
self.test_queue.process()
# If a cleanup function is provided, call it. This is usually used to
# ensure that all processes are terminated or perform additional cleanup.
if self.minimizer.cleanup_function:
self.minimizer.cleanup_function()
# Push any results generated while this test was running to the queue.
self.currently_processing = False
while self.tests_to_queue:
with self.tests_to_queue_lock:
hypothesis = self.tests_to_queue.pop(0)
# This may trigger another round of processing, so don't hold the lock.
self._push_test_to_queue(hypothesis)
def process(self):
"""Start a test."""
if not self.test_queue:
return
while self.test_queue.size():
self._do_single_pass_process()
with self.merge_preparation_lock:
hypotheses_to_merge = self.unmerged_failing_hypotheses
self.unmerged_failing_hypotheses = []
with self.merge_lock:
self._attempt_merge(hypotheses_to_merge)
# Cache functions.
def _contains_required_tokens(self, hypothesis, test_file):
"""Check to see if this hypothesis contains untested tokens."""
# It is possible that we could copy this while it is being updated. We do
# not block in this case because the worst case scenario is that we run an
# irrelevant test, and blocking is potentially expensive.
working_required_tokens = copy.copy(self.required_tokens)
with self.merge_preparation_lock:
# A deep copy is not required. Hypotheses are not modified after being
# added to the list for processing.
unprocessed_hypotheses = copy.copy(self.unmerged_failing_hypotheses)
for unprocessed_hypothesis in unprocessed_hypotheses:
for token in unprocessed_hypothesis:
# For this check, we do not care if the merge would succeed or not since
# the best case is that we would add the token to the queue as well.
working_required_tokens[token] = False
for token in hypothesis:
if working_required_tokens[token]:
return True
# If we aren't going to run this test, this will not have a completion
# callback. If that happens, we need to clean up now.
self._delete_file_if_needed(test_file)
return False
def _has_tested(self, hypothesis):
"""Check to see if this hypothesis has been tested before."""
hypothesis_tuple = tuple(hypothesis)
with self.cache_lock:
if hypothesis_tuple in self.tested_hypotheses:
return True
self.tested_hypotheses.add(hypothesis_tuple)
return False
# Result checking functions.
def get_result(self):
"""Get the result of minimization."""
if not self.minimizer.tokenize:
return self.get_required_tokens()
return str(self)
def get_required_tokens(self):
"""Return all required tokens for this test case."""
return [t for i, t in enumerate(self.tokens) if self.required_tokens[i]]
def get_required_token_indices(self):
"""Get the indices of all remaining required tokens."""
return [i for i, v in enumerate(self.required_tokens) if v]
def _default_tokenizer(s):
"""Default string tokenizer which splits on newlines."""
return s.split('\n')
def _default_combiner(tokens):
"""Default token combiner which assumes each token is a line."""
return '\n'.join(tokens)
class Minimizer(object):
"""Base class for minimizers."""
def __init__(self,
test_function,
max_threads=1,
tokenizer=_default_tokenizer,
token_combiner=_default_combiner,
tokenize=True,
cleanup_function=None,
single_thread_cleanup_interval=DEFAULT_CLEANUP_INTERVAL,
deadline=None,
get_temp_file=None,
delete_temp_files=True,
batch_size=None,
progress_report_function=None,
file_extension=''):
"""Initialize a minimizer. A minimizer object can be used multiple times."""
self.test_function = test_function
self.max_threads = max_threads
self.tokenizer = tokenizer
self.token_combiner = token_combiner
self.tokenize = tokenize
self.cleanup_function = cleanup_function
self.single_thread_cleanup_interval = single_thread_cleanup_interval
self.deadline = deadline
self.get_temp_file = get_temp_file
self.delete_temp_files = delete_temp_files
self.progress_report_function = progress_report_function
if batch_size:
self.batch_size = batch_size
else:
self.batch_size = DEFAULT_TESTS_PER_THREAD * max_threads
if not get_temp_file:
self.get_temp_file = functools.partial(
tempfile.NamedTemporaryFile,
mode='wb',
delete=False,
prefix='min_',
suffix=file_extension)
else:
self.get_temp_file = get_temp_file
@staticmethod
def _handle_constructor_argument(key, kwargs, default=None):
"""Cleanup a keyword argument specific to a subclass and get the value."""
result = default
try:
result = kwargs[key]
del kwargs[key]
except KeyError:
pass
return result
def _execute(self, data):
"""Perform minimization on a test case."""
raise NotImplementedError
def minimize(self, data):
"""Wrapper to perform common tasks and call |_execute|."""
try:
testcase = self._execute(data)
except errors.MinimizationDeadlineExceededError, error:
# When a MinimizationDeadlineExceededError is raised, the partially
# minimized test case is stored with it so that we can recover the work
# that had been done up to that point.
testcase = error.testcase
return testcase.get_result()
@staticmethod
def run(data, thread_count=DEFAULT_THREAD_COUNT, file_extension=''):
"""Minimize |data| using this minimizer's default configuration."""
raise NotImplementedError
| 34.336898
| 115
| 0.700722
|
6403a9f6a5dbf7e60d210d5e695a587a0880b804
| 6,085
|
py
|
Python
|
cloudify_azure/workflows/discover.py
|
cloudify-incubator/cloudify-azure-plugin
|
49ecc485b70099d6d23dff81f50b17ab31f7fc18
|
[
"Apache-2.0"
] | 2
|
2018-08-16T01:50:35.000Z
|
2018-11-17T20:31:37.000Z
|
cloudify_azure/workflows/discover.py
|
cloudify-incubator/cloudify-azure-plugin
|
49ecc485b70099d6d23dff81f50b17ab31f7fc18
|
[
"Apache-2.0"
] | 43
|
2017-05-18T12:31:42.000Z
|
2019-01-08T09:20:42.000Z
|
cloudify_azure/workflows/discover.py
|
cloudify-cosmo/cloudify-azure-plugin
|
b9ac49f603d6588685e00d16f19bcfe3f84d5c98
|
[
"Apache-2.0"
] | 13
|
2015-07-09T10:49:55.000Z
|
2021-05-06T09:24:30.000Z
|
# #######
# Copyright (c) 2021 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cloudify.decorators import workflow
from cloudify.workflows import ctx as wtx
from cloudify.exceptions import NonRecoverableError
from .resources import (
get_resources,
get_locations
)
from cloudify_common_sdk.utils import (
create_deployments,
install_deployments
)
AZURE_TYPE = 'cloudify.azure.nodes.resources.Azure'
def discover_resources(node_id=None,
resource_types=None,
locations=None,
ctx=None,
**_):
discovered_resources = {}
ctx = ctx or wtx
node_id = node_id or get_azure_account_node_id(ctx.nodes)
node = ctx.get_node(node_id)
for node_instance in node.instances:
if not isinstance(locations, list) and not locations:
locations = get_locations(node, ctx.deployment.id)
resources = get_resources(node, locations, resource_types, ctx.logger)
discovered_resources.update(resources)
node_instance._node_instance.runtime_properties['resources'] = \
resources
return discovered_resources
raise NonRecoverableError(
'No node instances of the provided node ID {n} exist. '
'Please install the account blueprint.'.format(n=node_id))
def deploy_resources(group_id,
blueprint_id,
deployment_ids,
inputs,
labels,
ctx,
**_):
"""Create new deployments and execute install.
:param group_id: The new Group ID.
:param blueprint_id: The child blueprint ID.
:param deployment_ids: A list of deployment IDs.
:param inputs: A list of inputs in order of the deployment IDs.
:param ctx:
:param _:
:return:
"""
if not deployment_ids:
return
ctx.logger.info(
'Creating deployments {dep} with blueprint {blu} '
'with these inputs: {inp} and with these labels: {lab}'.format(
dep=deployment_ids, blu=blueprint_id, inp=inputs, lab=labels))
create_deployments(group_id, blueprint_id, deployment_ids, inputs, labels)
@workflow
def discover_and_deploy(node_id=None,
resource_types=None,
regions=None,
blueprint_id=None,
ctx=None,
**_):
"""This workflow will check against the parent "Account" node for
resources of the types found in resource_types in regions.
Then we deploy child deployments of those resources.
:param node_id: An AZURE_TYPE node template name.
:param resource_types: List of crawlable types. (AWS::EKS::CLUSTER)
:param regions: List of regions.
:param blueprint_id: The blueprint ID to create child deployments with.
:param ctx:
:param _:
:return:
"""
ctx = ctx or wtx
blueprint_id = blueprint_id or ctx.blueprint.id
label_list = [{'csys-env-type': 'environment'},
{'csys-obj-parent': ctx.deployment.id}]
# Refresh the AZURE_TYPE nodes list..
resources = discover_resources(node_id=node_id,
resource_types=resource_types,
regions=regions,
ctx=ctx)
# Loop over the resources to create new deployments from them.
resource_type = None
for _, resource_types in resources.items():
deployment_ids_list = []
inputs_list = []
for resource_type, resources in resource_types.items():
for resource_id, _ in resources.items():
# We are now at the resource level.
# Create the inputs and deployment ID for the new deployment.
resource_name = resource_id.split('/')
inputs_list.append(
{
'resource_group_name': resource_name[-5],
'managed_cluster_name': resource_name[-1]
}
)
deployment_ids_list.append(
generate_deployment_ids(
ctx.deployment.id, resource_name[-1])
)
if deployment_ids_list:
label_list.append({'csys-env-type': resource_type})
deploy_resources(ctx.deployment.id,
blueprint_id,
deployment_ids_list,
inputs_list,
label_list,
ctx)
del label_list[-1]
deployment_ids_list = []
inputs_list = []
install_deployments(ctx.deployment.id)
def get_azure_account_node_id(nodes):
""" Check and see if the Workflow Context Node is a supported account type.
:param nodes: list of nodes from CTX.
:return:
"""
for node in nodes:
if AZURE_TYPE in node.type_hierarchy:
return node.id
raise NonRecoverableError(
'The deployment has no nodes of type {t}.'.format(t=AZURE_TYPE))
def generate_deployment_ids(deployment_id, resource_name):
""" Create a simple child deployment name. I use a method because it
makes the above code slightly less messy.
:param deployment_id:
:param resource_name:
:return:
"""
return '{parent}-{child}'.format(
parent=deployment_id,
child=resource_name)
| 36.005917
| 79
| 0.606081
|
75114658f969ab90b006fa27c76b5757329723fb
| 2,324
|
py
|
Python
|
config.py
|
adamzwasserman/VirusTotal-Batcher
|
1a0cb64198b5b1f75d97ee5b8718e89f225f1395
|
[
"MIT"
] | 2
|
2018-06-10T13:45:09.000Z
|
2018-11-13T17:14:40.000Z
|
config.py
|
adamzwasserman/VirusTotal-Batcher
|
1a0cb64198b5b1f75d97ee5b8718e89f225f1395
|
[
"MIT"
] | null | null | null |
config.py
|
adamzwasserman/VirusTotal-Batcher
|
1a0cb64198b5b1f75d97ee5b8718e89f225f1395
|
[
"MIT"
] | null | null | null |
import os
import sys
from pathlib import Path
# VirusTotal Batch Machine Gun
version_info = (2, 0)
version = '.'.join(str(c) for c in version_info)
# ***General Params used by all (or any) modules****
debug = 0
# Server friendly. If 'yes' program will look for VTlookup.txt in this subdirectory of the user's home directory,
# and will save results to both the subdirectory of the user's home AND the same directory as the program file (with the
# user's username appended), MUST include the trailing forward slash. To store directly in home put "/".
server_friendly = 'no'
# If server_friendly = 'no', home_path is ignored
if server_friendly is 'yes':
home_path = "/"
else:
this_file = os.path.dirname(sys.argv[0])
program_path = os.path.abspath(this_file)
home_path = '/'.join(str(program_path).split('/')[:-1])
############################################################
# ***VirusTotal Specific Params****
# VirusTotal API Key
api_key = 'putkeyhere'
# Time between requests (decricated parameter, leave at 0)
request_wait = 0
# maximum time (in seconds) to wait for VT to respond before giving up (in case of the VT rate limiter kicking in)
max_wait = 1
# This sets the number of parallel VT lookups. Range is between 50-200.
process_pool_size = 50
# Set by the program DO NOT CHANGE
live_resolve = 'yes'
vt_auto_submit = 'yes'
############################################################
# ***Reno (live resolution) Specific Params****
reno_error_file_name = home_path+'/logs/Reno DNS Errors.txt'
# This sets the number of concurrent live resolutions. Range is between 30-100.
async_pool = 50
#What percentage of timeouts is acceptable in results (as a fraction. 10% is written as 0.1)
max_timeout_ratio = .1
# Resolution time out. Set to above 30 to get high accuracy. Less than 10 for speed. Do not go below 1.
reno_max_timeout_in_seconds = 4
reno_ns_timeout_in_seconds = 4
reno_overall_timeout = 60
run_reno_poolsize = 1
# reno_max_timeout_in_seconds = .15
# reno_ns_timeout_in_seconds = .15
# reno_overall_timeout = 1
# run_reno_poolsize = 100
# Hard limit on how many times to loop and retry batch of timed-out resolutions
max_resolution_retries = 5
riprova_max_retries = 1
riprova_retry_interval = 0.05
############################################################
| 30.578947
| 120
| 0.684165
|
2c67b2bbcca54f4a42d3803fda6cde4d9682d9e0
| 362
|
py
|
Python
|
Company_Based_Questions/Goldman Sachs/Second Most Repeated string.py
|
Satyam-Bhalla/Competitive-Coding
|
5814f5f60572f1e76495efe751b94bf4d2845198
|
[
"MIT"
] | 1
|
2021-12-09T10:36:48.000Z
|
2021-12-09T10:36:48.000Z
|
Company_Based_Questions/Goldman Sachs/Second Most Repeated string.py
|
Satyam-Bhalla/Competitive-Coding
|
5814f5f60572f1e76495efe751b94bf4d2845198
|
[
"MIT"
] | null | null | null |
Company_Based_Questions/Goldman Sachs/Second Most Repeated string.py
|
Satyam-Bhalla/Competitive-Coding
|
5814f5f60572f1e76495efe751b94bf4d2845198
|
[
"MIT"
] | null | null | null |
def sorted_second(val) :
return val[1]
t= int(input())
for i in range(t):
n = int(input())
arr = list(map(str,input().split()))
str_dict = {}
for ar in arr :
if ar not in str_dict :
str_dict[ar] = 1
else :
str_dict[ar] += 1
print(sorted(str_dict.items(),key=sorted_second,reverse=True)[1][0])
| 24.133333
| 72
| 0.541436
|
3ce1ceaecbe5e92c9e57136d886a599c26f3e815
| 1,879
|
py
|
Python
|
segmentation_models_pytorch/transinunet/model.py
|
PhilippMarquardt/segmentation_models.pytorch
|
8a884bdf7a0c92a2eb4f5d85120a83cd13b08a06
|
[
"MIT"
] | null | null | null |
segmentation_models_pytorch/transinunet/model.py
|
PhilippMarquardt/segmentation_models.pytorch
|
8a884bdf7a0c92a2eb4f5d85120a83cd13b08a06
|
[
"MIT"
] | null | null | null |
segmentation_models_pytorch/transinunet/model.py
|
PhilippMarquardt/segmentation_models.pytorch
|
8a884bdf7a0c92a2eb4f5d85120a83cd13b08a06
|
[
"MIT"
] | null | null | null |
from typing import Optional, Union, List
from .decoder import TransInUnetDecoder
from ..encoders import get_encoder
from ..base import SegmentationModel
from ..base import SegmentationHead, ClassificationHead
class TransInUnet(SegmentationModel):
def __init__(
self,
encoder_name: str = "resnet34",
encoder_depth: int = 5,
encoder_weights: Optional[str] = "imagenet",
decoder_use_batchnorm: bool = True,
decoder_channels: List[int] = (256, 128, 64, 32, 16),
decoder_attention_type: Optional[str] = None,
in_channels: int = 3,
classes: int = 1,
activation: Optional[Union[str, callable]] = None,
aux_params: Optional[dict] = None,
image_size = 128
):
super().__init__()
self.encoder = get_encoder(
encoder_name,
in_channels=in_channels,
depth=encoder_depth,
weights=encoder_weights,
)
self.decoder = TransInUnetDecoder(
encoder_channels=self.encoder.out_channels,
decoder_channels=decoder_channels,
n_blocks=encoder_depth,
use_batchnorm=decoder_use_batchnorm,
center=True if encoder_name.startswith("vgg") else False,
attention_type=decoder_attention_type,
image_size = image_size
)
self.segmentation_head = SegmentationHead(
in_channels=decoder_channels[-1],
out_channels=classes,
activation=activation,
kernel_size=3,
)
if aux_params is not None:
self.classification_head = ClassificationHead(
in_channels=self.encoder.out_channels[-1], **aux_params
)
else:
self.classification_head = None
self.name = "u-{}".format(encoder_name)
self.initialize()
| 32.964912
| 71
| 0.618946
|
ad583ac0d6d45028af08aabd37e81207cd9ff247
| 4,290
|
py
|
Python
|
scorched/dates.py
|
syslabcom/scorched
|
90e8c04f71657d62f8837b90fad5287004b25d85
|
[
"MIT"
] | 24
|
2015-02-16T20:08:40.000Z
|
2022-01-15T18:24:04.000Z
|
scorched/dates.py
|
syslabcom/scorched
|
90e8c04f71657d62f8837b90fad5287004b25d85
|
[
"MIT"
] | 50
|
2015-03-27T19:17:04.000Z
|
2021-06-29T06:48:05.000Z
|
scorched/dates.py
|
syslabcom/scorched
|
90e8c04f71657d62f8837b90fad5287004b25d85
|
[
"MIT"
] | 22
|
2015-03-24T20:54:58.000Z
|
2021-12-25T22:09:29.000Z
|
from __future__ import unicode_literals
import datetime
import fnmatch
import math
import pytz
import re
import scorched.exc
from scorched.compat import basestring
from scorched.compat import python_2_unicode_compatible
year = r'[+/-]?\d+'
tzd = r'Z|((?P<tzd_sign>[-+])(?P<tzd_hour>\d\d):(?P<tzd_minute>\d\d))'
extended_iso_template = r'(?P<year>' + year + r""")
(-(?P<month>\d\d)
(-(?P<day>\d\d)
([T%s](?P<hour>\d\d)
:(?P<minute>\d\d)
(:(?P<second>\d\d)
(.(?P<fraction>\d+))?)?
(""" + tzd + """)?)?
)?)?"""
extended_iso = extended_iso_template % " "
extended_iso_re = re.compile('^' + extended_iso + '$', re.X)
def datetime_from_w3_datestring(s):
""" We need to extend ISO syntax (as permitted by the standard) to allow
for dates before 0AD and after 9999AD. This is how to parse such a string
"""
m = extended_iso_re.match(s)
if not m:
raise ValueError
d = m.groupdict()
d['year'] = int(d['year'])
d['month'] = int(d['month'] or 1)
d['day'] = int(d['day'] or 1)
d['hour'] = int(d['hour'] or 0)
d['minute'] = int(d['minute'] or 0)
d['fraction'] = d['fraction'] or '0'
d['second'] = float("%s.%s" % ((d['second'] or '0'), d['fraction']))
del d['fraction']
if d['tzd_sign']:
if d['tzd_sign'] == '+':
tzd_sign = 1
elif d['tzd_sign'] == '-':
tzd_sign = -1
tz_delta = datetime_delta_factory(tzd_sign * int(d['tzd_hour']),
tzd_sign * int(d['tzd_minute']))
else:
tz_delta = datetime_delta_factory(0, 0)
del d['tzd_sign']
del d['tzd_hour']
del d['tzd_minute']
d['tzinfo'] = pytz.utc
dt = datetime_factory(**d) + tz_delta
return dt
class DateTimeRangeError(ValueError):
pass
def datetime_factory(**kwargs):
second = kwargs.get('second')
if second is not None:
f, i = math.modf(second)
kwargs['second'] = int(i)
kwargs['microsecond'] = int(f * 1000000)
try:
return datetime.datetime(**kwargs)
except ValueError as e:
raise DateTimeRangeError(e.args[0])
def datetime_delta_factory(hours, minutes):
return datetime.timedelta(hours=hours, minutes=minutes)
class solr_date(object):
"""
This class can be initialized from native python datetime
objects and will serialize to a format appropriate for Solr
"""
def __init__(self, v):
if isinstance(v, solr_date):
self._dt_obj = v._dt_obj
elif isinstance(v, basestring):
self._dt_obj = datetime_from_w3_datestring(v)
elif hasattr(v, "strftime"):
self._dt_obj = self.from_date(v)
else:
raise scorched.exc.SolrError(
"Cannot initialize solr_date from %s object" % type(v))
def __hash__(self):
return self._dt_obj.__hash__()
@staticmethod
def from_date(dt_obj):
# Python datetime objects may include timezone information
if hasattr(dt_obj, 'tzinfo') and dt_obj.tzinfo:
# but Solr requires UTC times.
return dt_obj.astimezone(pytz.utc).replace(tzinfo=None)
else:
return dt_obj
@property
def microsecond(self):
return self._dt_obj.microsecond
def __repr__(self):
return repr(self._dt_obj)
@python_2_unicode_compatible
def __str__(self):
""" Serialize a datetime object in the format required
by Solr. See http://wiki.apache.org/solr/IndexingDates
"""
return "%sZ" % (self._dt_obj.isoformat(), )
def __lt__(self, other):
try:
other = other._dt_obj
except AttributeError:
pass
return self._dt_obj < other
def __eq__(self, other):
try:
other = other._dt_obj
except AttributeError:
pass
return self._dt_obj == other
def is_datetime_field(name, datefields):
if name in datefields:
return True
for fieldpattern in [d for d in datefields if '*' in d]:
# XXX: there is better than fnmatch ?
if fnmatch.fnmatch(name, fieldpattern):
return True
return False
| 29.383562
| 77
| 0.582751
|
38ac1b7fda3674af072337fbf4402026b807f57c
| 2,061
|
py
|
Python
|
fastgc/layers/conv.py
|
ppmlguy/fastgradclip
|
0d8bff42ab13fa3471c520a2823050ccf0ff4a21
|
[
"MIT"
] | 2
|
2020-10-16T10:14:25.000Z
|
2021-03-25T17:19:34.000Z
|
fastgc/layers/conv.py
|
ppmlguy/fastgradclip
|
0d8bff42ab13fa3471c520a2823050ccf0ff4a21
|
[
"MIT"
] | null | null | null |
fastgc/layers/conv.py
|
ppmlguy/fastgradclip
|
0d8bff42ab13fa3471c520a2823050ccf0ff4a21
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from .base_layer import BasePGradLayer
from fastgc.common.im2col import im2col_indices
from fastgc.util import conv_outsize
class Conv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1,
bias=True, padding_mode='zeros'):
super(Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias, padding_mode)
self.pre_activation = None
self.layer_input = None
def forward(self, input):
self.layer_input = input
out = F.conv2d(input, self.weight, self.bias, self.stride, self.padding)
self.pre_activation = out
return out
def per_example_gradient(self, deriv_pre_activ):
dLdZ = deriv_pre_activ
H = self.layer_input
batch_size, n_filter = dLdZ.shape[0], dLdZ.shape[1]
per_grad_bias = None
if self.bias is not None:
per_grad_bias = dLdZ.view(batch_size, n_filter, -1).sum(2) # bias
k1, k2 = self.kernel_size
C_in = H.shape[1]
dLdZ_reshaped = dLdZ.view(batch_size, n_filter, -1)
padding = self.padding[0]
stride = self.stride[0]
h_col = im2col_indices(H, k1, k2, padding=padding, stride=stride)
per_grad_weight = torch.bmm(dLdZ_reshaped, h_col.transpose(1, 2))
return per_grad_weight, per_grad_bias
def pe_grad_sqnorm(self, deriv_pre_activ):
batch_size = deriv_pre_activ.shape[0]
pe_grad_weight, pe_grad_bias = self.per_example_gradient(deriv_pre_activ)
sq_norm_weight = pe_grad_weight.pow(2).view(batch_size, -1).sum(1)
if self.bias is not None:
sq_norm_bias = pe_grad_bias.pow(2).view(batch_size, -1).sum(1)
return sq_norm_weight + sq_norm_bias
else:
return sq_norm_weight
| 34.932203
| 84
| 0.639981
|
f42827e5decf63c69b30914dd34d704f01dca20e
| 5,830
|
py
|
Python
|
tests/test_adapter.py
|
coldnight/parameter
|
70a9f5e21ebb78d526d074eea64a16242b129848
|
[
"Apache-2.0"
] | 2
|
2017-08-08T03:30:25.000Z
|
2017-12-02T19:10:38.000Z
|
tests/test_adapter.py
|
coldnight/parameter
|
70a9f5e21ebb78d526d074eea64a16242b129848
|
[
"Apache-2.0"
] | null | null | null |
tests/test_adapter.py
|
coldnight/parameter
|
70a9f5e21ebb78d526d074eea64a16242b129848
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""This module tests adapters."""
from __future__ import print_function, division, unicode_literals
import json
import unittest
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
from tornado import testing
from tornado import web
from parameter import Model, Argument, types
from parameter import ArgumentMissError, ArgumentInvalidError
from parameter.adapter import TornadoAdapter, JSONAdapter
class UserEntity(Model):
username = Argument(types.String(max_len=100))
password = Argument(types.String(max_len=64))
name = Argument(types.Unicode(max_len=50))
age = Argument(types.Integer, default=18)
badges = Argument(types.Unicode, multiple=True, alias="badge")
class SingleArgEntity(Model):
username = Argument(types.String(max_len=100),
miss_message="Require username")
class DemoHandler(web.RequestHandler):
def get(self):
try:
if self.request.path == "/":
entity = UserEntity(TornadoAdapter(self))
else:
entity = SingleArgEntity(TornadoAdapter(self))
except ArgumentMissError as e:
self.write({
"missing": True,
"name": e.name,
})
return
except ArgumentInvalidError as e:
self.write({
"invalid": True,
"name": e.name,
})
self.write({
"username": entity.username.decode("utf8"),
"password": entity.password.decode('utf8'),
"name": entity.name,
"age": entity.age,
"badges": entity.badges,
})
class TornadoAdapterTestCase(testing.AsyncHTTPTestCase):
def get_app(self):
return web.Application([
(r'/', DemoHandler),
(r'/1', DemoHandler),
])
def _fetch_json(self, path, params=None):
resp = self.fetch(path + "?" + urlencode(params or {}))
self.assertEqual(resp.code, 200)
return json.loads(resp.body.decode('utf8'))
def test_miss(self):
data = self._fetch_json("/1")
self.assertTrue(data["missing"])
self.assertEqual(data["name"], "username")
def test_ok(self):
data = self._fetch_json("/", {
"username": "un",
"password": "pw",
"age": 10,
"name": "Gray",
})
self.assertDictEqual(data, {
"username": "un",
"password": "pw",
"age": 10,
"name": "Gray",
"badges": [],
})
def test_multiple(self):
data = self._fetch_json("/", [
("username", "uw"),
("password", "pw"),
("age", 10),
("name", "Gray"),
("badge", "1"),
("badge", "2"),
])
self.assertDictEqual(data, {
"username": "uw",
"password": "pw",
"age": 10,
"name": "Gray",
"badges": ["1", "2"]
})
class DemoEntity(Model):
a = Argument(types.Integer)
b = Argument(types.Integer)
class JSONAdapterTestCase(unittest.TestCase):
def test_binary_string(self):
adapter = JSONAdapter(b'{"a": 1, "b": 2}')
entity = DemoEntity(adapter)
self.assertEqual(entity.a, 1)
self.assertEqual(entity.b, 2)
def test_text_string(self):
adapter = JSONAdapter('{"a": 1, "b": 2}')
entity = DemoEntity(adapter)
self.assertEqual(entity.a, 1)
self.assertEqual(entity.b, 2)
def test_dict(self):
adapter = JSONAdapter({"a": 1, "b": 2})
entity = DemoEntity(adapter)
self.assertEqual(entity.a, 1)
self.assertEqual(entity.b, 2)
def test_not_dict(self):
with self.assertRaises(TypeError):
JSONAdapter([])
def test_nested(self):
class NestedEntity(Model):
demo = Argument(types.Nested(DemoEntity))
c = Argument(types.Integer)
adapter = JSONAdapter({"demo": {"a": 1, "b": 2}, "c": 3})
entity = NestedEntity(adapter)
self.assertEqual(entity.c, 3)
self.assertEqual(entity.demo.a, 1)
self.assertEqual(entity.demo.b, 2)
def test_multiple_nested(self):
class NestedEntity(Model):
demos = Argument(types.Nested(DemoEntity), multiple=True,
alias="demo")
c = Argument(types.Integer)
adapter = JSONAdapter({"demo": [
{"a": 1, "b": 2},
{"a": 4, "b": 5}
], "c": 3})
entity = NestedEntity(adapter)
self.assertEqual(entity.c, 3)
self.assertEqual(entity.demos[0].a, 1)
self.assertEqual(entity.demos[0].b, 2)
self.assertEqual(entity.demos[1].a, 4)
self.assertEqual(entity.demos[1].b, 5)
def test_multiple_nested_type_error(self):
class NestedEntity(Model):
demos = Argument(types.Nested(DemoEntity), multiple=True)
c = Argument(types.Integer)
adapter = JSONAdapter({"demo": {"a": 1, "b": 2}, "c": 3})
with self.assertRaises(ArgumentInvalidError):
NestedEntity(adapter)
def test_nested_model_cls_type_error(self):
with self.assertRaises(TypeError):
class NestedEntity(Model):
demos = Argument(types.Nested([]), alias="demo", multiple=True)
c = Argument(types.Integer)
def test_nested_model_cls_value_error(self):
class T(object):
pass
with self.assertRaises(ValueError):
class NestedEntity(Model):
demos = Argument(types.Nested(T), alias="demo", multiple=True)
c = Argument(types.Integer)
| 28.719212
| 79
| 0.558662
|
10912837c0beba991784d30a10752f387643eb91
| 2,303
|
py
|
Python
|
conjure-python-core/src/test/resources/types/expected/package_name/product/__init__.py
|
gabeboning/conjure-python
|
f5747b62197703bc898432435407eca419983f17
|
[
"Apache-2.0"
] | null | null | null |
conjure-python-core/src/test/resources/types/expected/package_name/product/__init__.py
|
gabeboning/conjure-python
|
f5747b62197703bc898432435407eca419983f17
|
[
"Apache-2.0"
] | null | null | null |
conjure-python-core/src/test/resources/types/expected/package_name/product/__init__.py
|
gabeboning/conjure-python
|
f5747b62197703bc898432435407eca419983f17
|
[
"Apache-2.0"
] | null | null | null |
from .._impl import (
product_AliasAsMapKeyExample as AliasAsMapKeyExample,
product_AnyExample as AnyExample,
product_AnyMapExample as AnyMapExample,
product_BearerTokenAliasExample as BearerTokenAliasExample,
product_BearerTokenExample as BearerTokenExample,
product_BinaryAliasExample as BinaryAliasExample,
product_BinaryExample as BinaryExample,
product_BooleanAliasExample as BooleanAliasExample,
product_BooleanExample as BooleanExample,
product_CollectionAliasExample as CollectionAliasExample,
product_CreateDatasetRequest as CreateDatasetRequest,
product_DateTimeAliasExample as DateTimeAliasExample,
product_DateTimeExample as DateTimeExample,
product_DoubleAliasExample as DoubleAliasExample,
product_DoubleExample as DoubleExample,
product_EmptyObjectExample as EmptyObjectExample,
product_EnumExample as EnumExample,
product_EnumFieldExample as EnumFieldExample,
product_FieldObject as FieldObject,
product_IntegerAliasExample as IntegerAliasExample,
product_IntegerExample as IntegerExample,
product_ListExample as ListExample,
product_ManyFieldExample as ManyFieldExample,
product_MapAliasExample as MapAliasExample,
product_MapExample as MapExample,
product_NestedAliasExample as NestedAliasExample,
product_OptionalExample as OptionalExample,
product_OptionsUnion as OptionsUnion,
product_OptionsUnionVisitor as OptionsUnionVisitor,
product_PrimitiveOptionalsExample as PrimitiveOptionalsExample,
product_RecursiveObjectAlias as RecursiveObjectAlias,
product_RecursiveObjectExample as RecursiveObjectExample,
product_ReferenceAliasExample as ReferenceAliasExample,
product_ReservedKeyExample as ReservedKeyExample,
product_RidAliasExample as RidAliasExample,
product_RidExample as RidExample,
product_SafeLongAliasExample as SafeLongAliasExample,
product_SafeLongExample as SafeLongExample,
product_SetExample as SetExample,
product_StringAliasExample as StringAliasExample,
product_StringExample as StringExample,
product_UnionTypeExample as UnionTypeExample,
product_UnionTypeExampleVisitor as UnionTypeExampleVisitor,
product_UuidAliasExample as UuidAliasExample,
product_UuidExample as UuidExample,
)
| 47
| 67
| 0.83934
|
8e09bc2674b6e9cd4af1681cc851942f63ba78ca
| 2,734
|
py
|
Python
|
hazma/pbh.py
|
LoganAMorrison/Hazma
|
e9612729767ff48d5ce50633393f81ee021242d2
|
[
"MIT"
] | 6
|
2019-07-30T18:14:43.000Z
|
2020-10-25T04:58:44.000Z
|
hazma/pbh.py
|
LoganAMorrison/Hazma
|
e9612729767ff48d5ce50633393f81ee021242d2
|
[
"MIT"
] | 8
|
2017-12-19T08:06:59.000Z
|
2021-04-22T02:15:26.000Z
|
hazma/pbh.py
|
LoganAMorrison/Hazma
|
e9612729767ff48d5ce50633393f81ee021242d2
|
[
"MIT"
] | 1
|
2020-04-01T11:08:49.000Z
|
2020-04-01T11:08:49.000Z
|
import numpy as np
import pandas as pd
from pkg_resources import resource_filename
from scipy.interpolate import interp1d
from hazma.parameters import g_to_MeV, MeV_to_g
from hazma.theory import TheoryDec
class PBH(TheoryDec):
"""
A creative implementation of PBH dark matter as a `TheoryDec`.
"""
def __init__(
self, mx, f_pbh_dummy=1, spectrum_kind="secondary", bh_secondary=False
):
"""
:param mx: PBH mass in MeV
"""
self.f_pbh_dummy = f_pbh_dummy
# Must call in this order
self._spectrum_kind = spectrum_kind
self._bh_secondary = bh_secondary
self._load_spec_data()
self.mx = mx
def __repr__(self):
return f"PBH(m={self.mx * MeV_to_g} g, f={self.f_pbh_dummy})"
def _load_spec_data(self):
"""
Load spectrum data tables
"""
if self.spectrum_kind == "primary":
fname = resource_filename(__name__, "pbh_data/pbh_primary_spectra_bh.csv")
elif self.spectrum_kind == "secondary" and self.bh_secondary:
fname = resource_filename(__name__, "pbh_data/pbh_secondary_spectra_bh.csv")
elif self.spectrum_kind == "secondary":
fname = resource_filename(__name__, "pbh_data/pbh_secondary_spectra.csv")
else:
raise ValueError("invalid spectrum_kind")
def to_float(s):
try:
return float(s)
except ValueError:
sig, exponent = s.split("e")
return float(sig) * 10 ** float(exponent)
df = pd.read_csv(fname)
self._mxs = df.columns[2:].map(to_float).values * g_to_MeV
self._e_gams = df.iloc[:, 1].values * 1e3 # GeV -> MeV
self._d2n_dedt = df.iloc[:, 2:].values * 1e-3 # 1/GeV -> 1/MeV
@property
def bh_secondary(self):
return self._bh_secondary
@bh_secondary.setter
def bh_secondary(self, bhs):
raise RuntimeError("cannot set bh_secondary")
@property
def spectrum_kind(self):
return self._spectrum_kind
@spectrum_kind.setter
def spectrum_kind(self, sk):
raise RuntimeError("cannot set spectrum_kind")
@property
def mx(self):
return self._mx
@mx.setter
def mx(self, mx):
self._mx = mx
idx = np.where(self._mxs == mx)[0][0]
fn = interp1d(
self._e_gams, self._d2n_dedt[:, idx], bounds_error=False, fill_value=0
)
self._spectrum_funcs = lambda: {"all": fn}
@staticmethod
def list_final_decay_states():
return ["all"]
def _decay_widths(self):
return {"all": self.f_pbh_dummy}
def _gamma_ray_line_energies(self):
return {}
| 28.185567
| 88
| 0.614484
|
843f98fcc6f99ee68b4a570f40023be50b8c1de5
| 2,961
|
py
|
Python
|
build.py
|
skydark/matools
|
c658dd36bf154482dbe344d5cae0686135a59607
|
[
"MIT"
] | 1
|
2018-07-12T16:15:50.000Z
|
2018-07-12T16:15:50.000Z
|
build.py
|
skydark/matools
|
c658dd36bf154482dbe344d5cae0686135a59607
|
[
"MIT"
] | null | null | null |
build.py
|
skydark/matools
|
c658dd36bf154482dbe344d5cae0686135a59607
|
[
"MIT"
] | null | null | null |
#/usr/bin/python3
# -*- coding: utf-8 -*-
import os
from card import MACardList, MACardToDB
from generator import MAGenerator
from utils import copytree, copyfiles
from mapngdecoder import decrypt
def build_viewer_face(root_dir, out_dir):
copyfiles(os.path.join(root_dir, 'download', 'image', 'face'), out_dir,
decrypt=decrypt, tranformer=lambda n: n+'.png')
def build_viewer_card(root_dir, out_dir):
copyfiles(os.path.join(root_dir, 'download', 'image', 'card'), out_dir,
decrypt=decrypt, tranformer=lambda n: n+'.png')
def build_viewer_data(root_dir, path, var_name):
cardlist = MACardList(root_dir)
dumper = MACardToDB(cardlist.cards)
# dumper.dump('ma.db')
json = dumper.dump_json()
with open(path, 'w') as f:
f.write('%s=%s;' % (var_name, json))
def build_ons_script(root_dir, out_dir):
MAGenerator(root_dir, out_dir).generate()
def build_que_adv(decrypted, path):
from io import BytesIO
try:
from PIL import Image
except ImportError as e:
print("PIL未安装,无法处理对话框图像,忽略")
return
im = Image.open(BytesIO(decrypted)).convert('RGBA')
im = im.crop((0, 0, 960, 250))
im.save(path)
def build_ons_data(root_dir, out_dir):
build_ons_script(root_dir, out_dir)
se_dir = os.path.join(root_dir, 'download', 'sound')
print("复制bgm中...")
copyfiles(se_dir, os.path.join(out_dir, "bgm"), lambda n: n.startswith('bgm'))
print("复制音效中...")
copyfiles(se_dir, os.path.join(out_dir, "se"), lambda n: n.startswith('se'))
adv_dir = os.path.join(root_dir, 'download', 'image', 'adv')
print("解密背景图片中...")
copyfiles(adv_dir, os.path.join(out_dir, 'bgimage'),
lambda n: n.startswith('adv_bg'),
decrypt=decrypt, tranformer=lambda n: n+'.png')
copyfiles(os.path.join(root_dir, 'download', 'rest'),
os.path.join(out_dir, 'bgimage'),
lambda n: n == 'exp_map_bg',
decrypt=decrypt, tranformer=lambda n: 'map.png')
print("解密角色图片中...")
copyfiles(adv_dir, os.path.join(out_dir, 'chara'),
lambda n: n.startswith('adv_chara'),
decrypt=decrypt, tranformer=lambda n: n+'.png')
print("生成对话框中...")
image_dir = os.path.join(out_dir, 'image')
os.makedirs(image_dir, exist_ok=True)
with open(os.path.join(root_dir, 'download', 'rest', 'que_adv'), 'rb') as f:
img = decrypt(f.read())
build_que_adv(img, os.path.join(image_dir, 'que_adv.png'))
voice_dir = os.path.join(root_dir, 'download', 'voice')
if os.path.isdir(voice_dir):
print("复制语音目录中...")
copytree(voice_dir, os.path.join(out_dir, 'voice'))
else:
print("语音目录不存在,已忽略")
if __name__ == '__main__':
root_dir = 'save'
build_viewer_data(root_dir, 'viewer/js/madb.js', 'MA_DB')
build_viewer_face(root_dir, 'viewer/image/face')
build_viewer_card(root_dir, 'viewer/image/card')
build_ons_data(root_dir, 'ons')
| 34.430233
| 82
| 0.647754
|
90f249ec88d23bed7f700e224eb2b7cc94701850
| 160
|
py
|
Python
|
capcom/census/census.py
|
rcthomas/capcom
|
e72c41da046e05d8450dfa7297cb5dee5a206daa
|
[
"BSD-3-Clause"
] | 1
|
2015-07-24T21:32:22.000Z
|
2015-07-24T21:32:22.000Z
|
capcom/census/census.py
|
rcthomas/capcom
|
e72c41da046e05d8450dfa7297cb5dee5a206daa
|
[
"BSD-3-Clause"
] | null | null | null |
capcom/census/census.py
|
rcthomas/capcom
|
e72c41da046e05d8450dfa7297cb5dee5a206daa
|
[
"BSD-3-Clause"
] | null | null | null |
class Census ( object ) :
def __init__( self, selector ) :
self.selector = selector
def __call__( self ) :
raise NotImplementedError
| 17.777778
| 36
| 0.61875
|
d154c10b0a3dfdaf66a10fc874a61a11a7581a8a
| 6,683
|
py
|
Python
|
tests/test_ans_devices/driver_controller.py
|
Miki-Jin/python-openimu
|
9aa2aab967e7328320053ab335d285415fe49e31
|
[
"Apache-2.0"
] | null | null | null |
tests/test_ans_devices/driver_controller.py
|
Miki-Jin/python-openimu
|
9aa2aab967e7328320053ab335d285415fe49e31
|
[
"Apache-2.0"
] | null | null | null |
tests/test_ans_devices/driver_controller.py
|
Miki-Jin/python-openimu
|
9aa2aab967e7328320053ab335d285415fe49e31
|
[
"Apache-2.0"
] | null | null | null |
"""This module defines the `ProcessController` class
which runs app as a subprocess and can write to it and read from it to get
structured output.
"""
import logging
import subprocess
from distutils.spawn import find_executable
from typing import Union, List, Optional
from io_manager import IoManager
from constants import (
DEFAULT_PROCESS_TIMEOUT_SEC,
DEFAULT_TIME_TO_CHECK_FOR_ADDITIONAL_OUTPUT_SEC,
)
import time
import process_parser
import sys
DEFAULT_PROCESS_LAUNCH_COMMAND = ["./ans-devices.exe", "--cli"]
logger = logging.getLogger(__name__)
class ProcessController:
def __init__(
self,
command,
time_to_check_for_additional_output_sec=DEFAULT_TIME_TO_CHECK_FOR_ADDITIONAL_OUTPUT_SEC,
):
"""
Run a subprocess. Send commands and receive structured output.
Create new object, along with subprocess
Args:
command: Command to run in shell to spawn new subprocess
time_to_check_for_additional_output_sec: When parsing responses, wait this amout of time before exiting (exits before timeout is reached to save time). If <= 0, full timeout time is used.
Returns:
New ProcessController object
"""
if command is None:
command = DEFAULT_PROCESS_LAUNCH_COMMAND
# if not any([("--interpreter=mi" in c) for c in command]):
# logger.warning(
# "warning. "
# )
self.abs_app_path = None # abs path to executable
self.command = command # type: List[str]
self.time_to_check_for_additional_output_sec = (
time_to_check_for_additional_output_sec
)
self.app_process = None
self._allow_overwrite_timeout_times = (
self.time_to_check_for_additional_output_sec > 0
)
app_path = command.split(' ')[0]
if not app_path:
raise ValueError("a valid path to app must be specified")
else:
abs_app_path = find_executable(app_path)
if abs_app_path is None:
raise ValueError(
'executable could not be resolved from "%s"' % app_path
)
else:
self.abs_app_path = abs_app_path
self.spawn_new_subprocess()
def spawn_new_subprocess(self):
"""Spawn a new subprocess with the arguments supplied to the object
during initialization. If subprocess already exists, terminate it before
spanwing a new one.
Return int: process id
"""
if self.app_process:
logger.debug(
"Killing current subprocess (pid %d)" % self.app_process.pid
)
self.exit()
logger.debug(f'Launching app: {" ".join(self.command)}')
# print('xxxxxxxxxxxxxxxxxxxxx', self.command)
# Use pipes to the standard streams
self.app_process = subprocess.Popen(
self.command,
shell=False,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=0,
)
self.io_manager = IoManager(
self.app_process.stdin,
self.app_process.stdout,
self.app_process.stderr,
self.time_to_check_for_additional_output_sec,
)
return self.app_process.pid
def get_process_response(
self, timeout_sec: float = DEFAULT_PROCESS_TIMEOUT_SEC, raise_error_on_timeout=True
):
"""Get process response. See IoManager.get_process_response() for details"""
return self.io_manager.get_process_response(timeout_sec, raise_error_on_timeout)
def write(
self,
mi_cmd_to_write: Union[str, List[str]],
timeout_sec=DEFAULT_PROCESS_TIMEOUT_SEC,
raise_error_on_timeout: bool = True,
read_response: bool = True,
):
print('cmd: ', mi_cmd_to_write)
"""Write command to process. See IoManager.write() for details"""
return self.io_manager.write(
mi_cmd_to_write, timeout_sec, raise_error_on_timeout, read_response
)
def exit(self) -> None:
"""Terminate process"""
if self.app_process:
self.app_process.terminate()
self.app_process.communicate()
self.app_process = None
return None
if __name__ == '__main__':
if len(sys.argv) < 2:
print('input upgrade file')
exit(-1)
upgrade_file = sys.argv[1]
fs_log = open('./log.txt', 'w')
driver_cmd = './ans-devices.exe --cli'
app_handle = ProcessController(driver_cmd)
suc_count = 0
fail_count = 0
data = ''
process_ret = ''
while True:
while True:
try:
response = app_handle.get_process_response()
try:
process_ret += process_parser.get_gdb_response_str(response)
except Exception as e:
print(e)
if 'Connected' in process_ret:
print('python drivder connected...')
fs_log.write(process_ret)
process_ret = ''
break
except Exception as e:
time.sleep(1)
print('wait to connect')
time.sleep(2)
while True:
response = app_handle.write('upgrade {0}'.format(upgrade_file), 1, read_response = False)
time_used = 0
while True:
try:
response = app_handle.get_process_response()
try:
process_ret += process_parser.get_gdb_response_str(response)
except Exception as e:
print(e)
if 'RTK_INS App' in process_ret:
print('upgrade suc...')
suc_count+= 1
break
elif 'failed' in process_ret:
print('upgrade fail...')
fail_count+= 1
break
except Exception as e:
time.sleep(1)
time_used+= 2
print("\rtime used: %ds" %(time_used), end="")
if time_used > 600:
print('time out')
time_used = 0
fail_count+= 1
break
print('suc_count = {0}, fail_count = {1}'.format(suc_count, fail_count))
fs_log.write(process_ret)
time.sleep(5)
process_ret = ''
| 34.807292
| 199
| 0.569056
|
ebe7efadcec8ee63fe3cf9c85c3cac90bf45cd94
| 1,248
|
py
|
Python
|
compress_images.py
|
jmball/compress_images
|
ca2524e4f28d111cafd1d28207c6eaca0d41d3b3
|
[
"MIT"
] | null | null | null |
compress_images.py
|
jmball/compress_images
|
ca2524e4f28d111cafd1d28207c6eaca0d41d3b3
|
[
"MIT"
] | null | null | null |
compress_images.py
|
jmball/compress_images
|
ca2524e4f28d111cafd1d28207c6eaca0d41d3b3
|
[
"MIT"
] | null | null | null |
"""Compress image files."""
import argparse
import pathlib
import PIL.Image
def get_args():
"""Get CLI arguments."""
parser = argparse.ArgumentParser()
parser.add_argument("--path", default="", help="File or folder path.")
parser.add_argument(
"--quality", type=int, default=30, help="Compressed image quality percentage."
)
return parser.parse_args()
def compress_images(path, quality):
"""Compress an image file or all image files in a folder.
Parameters
----------
path : str
File or folder path.
"""
path = pathlib.Path(path)
if path.exists() is False:
raise ValueError(f"Invalid path: {path}")
if path.is_dir():
folder = path
files = [f for f in path.iterdir()]
elif path.is_file():
folder = path.parent
files = [path]
for f in files:
try:
im = PIL.Image.open(f)
new_filename = f"compressed-{f.parts[-1]}"
im.save(folder.joinpath(new_filename), optimize=True, quality=quality)
except PIL.Image.UnidentifiedImageError:
print(f"Invalid image file: {f}")
if __name__ == "__main__":
args = get_args()
compress_images(args.path, args.quality)
| 24
| 86
| 0.608974
|
f12bf5a95e36bfa3144121510669839521c8100f
| 15,647
|
py
|
Python
|
scripts/classical/small_verify/script_t1.py
|
nadiahpk/inferring-undiscovered-species-extinctions
|
162710eac548a9c869233e5a6a64ed10d60733f5
|
[
"Unlicense"
] | 2
|
2019-04-22T16:02:15.000Z
|
2020-12-04T02:39:17.000Z
|
scripts/classical/small_verify/script_t1.py
|
nadiahpk/inferring-undiscovered-species-extinctions
|
162710eac548a9c869233e5a6a64ed10d60733f5
|
[
"Unlicense"
] | null | null | null |
scripts/classical/small_verify/script_t1.py
|
nadiahpk/inferring-undiscovered-species-extinctions
|
162710eac548a9c869233e5a6a64ed10d60733f5
|
[
"Unlicense"
] | null | null | null |
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
import pickle
from small_verify_fncs import get_coverage_estimates, get_example
import sys
sys.path.insert(0,'../../../undetected_extinctions') # so I can import the undetected extinctions package
from undetected_extinctions import find_U0_bnd
# parameters we'll keep constant across all variants
# ---
dName = '../../../results/classical/small_verify/' # directory to write results
pcileV = np.array([90, 80, 70, 60, 50, 40, 30, 20, 10]) # list of percentiles to check coverage for
negs = 100 # number of example runs to plot
# switches to control behaviour
plotPmu = True
plotegs = True
getCIs = True
plotCIs = True
# create the different runs to perform
# ---
runs = {
'const': { # completed
'U0': 50,
'S0': 10,
'nsims': 1000,
'nsamples': 1000,
'T': 5,
'mu_fnc_type': 'discrete',
'mu_fnc': lambda t: 0.1,
'mu_pdf_fnc': lambda mu: 1 if mu == 0.1 else 0,
'muV': [0.1],
'nu_fnc_type': 'discrete',
'nu_fnc': lambda t: 0.1,
'nu_pdf_fnc': lambda nu: 1 if nu == 0.1 else 0,
'nuV': [0.1],
},
'const2': { # NOTE -- a bit longer, but not a priority
'U0': 120,
'S0': 30,
'nsims': 500,
'nsamples': 100,
'T': 5,
'mu_fnc_type': 'discrete',
'mu_fnc': lambda t: 0.1,
'mu_pdf_fnc': lambda mu: 1 if mu == 0.1 else 0,
'muV': [0.1],
'nu_fnc_type': 'discrete',
'nu_fnc': lambda t: 0.1,
'nu_pdf_fnc': lambda nu: 1 if nu == 0.1 else 0,
'nuV': [0.1],
},
'const3': { # completed
'U0': 1200,
'S0': 300,
'nsims': 1000,
'nsamples': 1000,
'T': 5,
'mu_fnc_type': 'discrete',
'mu_fnc': lambda t: 0.1,
'mu_pdf_fnc': lambda mu: 1 if mu == 0.1 else 0,
'muV': [0.1],
'nu_fnc_type': 'discrete',
'nu_fnc': lambda t: 0.1,
'nu_pdf_fnc': lambda nu: 1 if nu == 0.1 else 0,
'nuV': [0.1],
},
'const4': { # completed
'U0': 100,
'S0': 600,
'nsims': 1000,
'nsamples': 1000,
'T': 5,
'mu_fnc_type': 'discrete',
'mu_fnc': lambda t: 0.15,
'mu_pdf_fnc': lambda mu: 1 if mu == 0.15 else 0,
'muV': [0.15],
'nu_fnc_type': 'discrete',
'nu_fnc': lambda t: 0.07,
'nu_pdf_fnc': lambda nu: 1 if nu == 0.07 else 0,
'nuV': [0.07],
},
'beta1': { # completed
'U0': 50,
'S0': 10,
'nsims': 1000,
'nsamples': 1000,
'T': 5,
'mu_fnc_type': 'continuous',
'mu_fnc': lambda t: stats.beta(4,36).rvs(),
'mu_pdf_fnc': lambda mu: stats.beta(4,36).pdf(mu),
'muV': np.linspace(0,1,100),
'nu_fnc_type': 'continuous',
'nu_fnc': lambda t: stats.beta(4,36).rvs(),
'nu_pdf_fnc': lambda nu: stats.beta(4,36).pdf(nu),
'nuV': np.linspace(0,1,100),
},
'beta2': { # completed
'U0': 120,
'S0': 30,
'nsims': 1000,
'nsamples': 1000,
'T': 5,
'mu_fnc_type': 'continuous',
'mu_fnc': lambda t: stats.beta(4,36).rvs(),
'mu_pdf_fnc': lambda mu: stats.beta(4,36).pdf(mu),
'muV': np.linspace(0,1,100),
'nu_fnc_type': 'continuous',
'nu_fnc': lambda t: stats.beta(4,36).rvs(),
'nu_pdf_fnc': lambda nu: stats.beta(4,36).pdf(nu),
'nuV': np.linspace(0,1,100),
},
'beta3': { # completed
'U0': 800,
'S0': 300,
'nsims': 1000,
'nsamples': 1000,
'T': 5,
'mu_fnc_type': 'continuous',
'mu_fnc': lambda t: stats.beta(50,450).rvs() if stats.uniform.rvs() < 0.5 else stats.beta(100,400).rvs(),
'mu_pdf_fnc': lambda mu: 0.5*stats.beta(50,450).pdf(mu) + 0.5*stats.beta(100,400).pdf(mu),
'muV': np.linspace(0,1,100),
'nu_fnc_type': 'continuous',
'nu_fnc': lambda t: stats.beta(50,450).rvs() if stats.uniform.rvs() < 0.5 else stats.beta(100,400).rvs(),
'nu_pdf_fnc': lambda nu: 0.5*stats.beta(50,450).pdf(nu) + 0.5*stats.beta(100,400).pdf(nu),
'nuV': np.linspace(0,1,100),
},
'fnct': { # completed
'U0': 120,
'S0': 30,
'nsims': 1000,
'nsamples': 1000,
'T': 5,
'mu_fnc_type': 'of_t',
'mu_fnc': lambda t: 0.3 if t == 3 else 0.05,
'nu_fnc_type': 'of_t',
'nu_fnc': lambda t: 0.1,
'nuV': [0.1],
},
'fnct1': { # completed
'U0': 1200,
'S0': 300,
'nsims': 500,
'nsamples': 100,
'T': 5,
'mu_fnc_type': 'of_t',
'mu_fnc': lambda t: 0.3 if t == 3 else 0.05,
'nu_fnc_type': 'of_t',
'nu_fnc': lambda t: 0.1,
'nuV': [0.1],
},
'fnct2': { # completed -- varying nu in time
'U0': 800,
'S0': 300,
'nsims': 1000,
'nsamples': 1000,
'T': 5,
'mu_fnc_type': 'of_t',
'mu_fnc': lambda t: 0.1,
'nu_fnc_type': 'of_t',
'nu_fnc': lambda t: 0.3 if t == 3 else 0.05,
'nuV': [0.1],
},
'fnct3': { # completed -- redo of fnct1 for longer
'U0': 1200,
'S0': 300,
'nsims': 1000,
'nsamples': 1000,
'T': 5,
'mu_fnc_type': 'of_t',
'mu_fnc': lambda t: 0.3 if t == 3 else 0.05,
'nu_fnc_type': 'of_t',
'nu_fnc': lambda t: 0.1,
'nuV': [0.1],
},
'fnct4': { # completed
'U0': 800,
'S0': 300,
'nsims': 1000,
'nsamples': 1000,
'T': 5,
'mu_fnc_type': 'of_t',
'mu_fnc': lambda t: 0.1,
'nu_fnc_type': 'of_t',
'nu_fnc': lambda t: [0.05, 0.1, 0.2, 0.4, 1][t-1],
'nuV': [0.1],
},
'fnct5': { # completed
'U0': 800,
'S0': 300,
'nsims': 500,
'nsamples': 100,
'T': 5,
'mu_fnc_type': 'of_t',
'mu_fnc': lambda t: [0.1, 0.1, 0.3, 0.2, .1][t-1],
'nu_fnc_type': 'of_t',
'nu_fnc': lambda t: [0.05, 0.4, 0.1, 0.1, .1][t-1],
'nuV': [0.1],
},
'fnct5l': { # completed
'U0': 800,
'S0': 300,
'nsims': 1000,
'nsamples': 1000,
'T': 5,
'mu_fnc_type': 'of_t',
'mu_fnc': lambda t: [0.1, 0.1, 0.3, 0.2, .1][t-1],
'nu_fnc_type': 'of_t',
'nu_fnc': lambda t: [0.05, 0.4, 0.1, 0.1, .1][t-1],
'nuV': [0.1],
},
'fnct6': { # completed
'U0': 500,
'S0': 500,
'nsims': 500,
'nsamples': 100,
'T': 5,
'mu_fnc_type': 'of_t',
'mu_fnc': lambda t: [0.3, 0.2, 0.1, 0.1, .1][t-1],
'nu_fnc_type': 'of_t',
'nu_fnc': lambda t: [0.05, 0.05, 0.2, 0.4, .8][t-1],
},
'fnct6l': { # completed
'U0': 500,
'S0': 500,
'nsims': 1000,
'nsamples': 1000,
'T': 5,
'mu_fnc_type': 'of_t',
'mu_fnc': lambda t: [0.3, 0.2, 0.1, 0.1, .1][t-1],
'nu_fnc_type': 'of_t',
'nu_fnc': lambda t: [0.05, 0.05, 0.2, 0.4, .8][t-1],
},
'fnct7': { # completed
'U0': 500,
'S0': 600,
'nsims': 1000,
'nsamples': 1000,
'T': 5,
'mu_fnc_type': 'of_t',
'mu_fnc': lambda t: [0.4, 0.1, 0.4, 0.1, .1][t-1],
'nu_fnc_type': 'of_t',
'nu_fnc': lambda t: [0, 0.5, 0.1, 0.1, 1][t-1],
},
'fnct8': { # NOTE -- running now, a bit crazy idea
'U0': 500,
'S0': 600,
'nsims': 1000,
'nsamples': 1000,
'T': 5,
'mu_fnc_type': 'of_t',
'mu_fnc': lambda t: [0, 0.5, 0.1, 0.1, 0.7][t-1],
'nu_fnc_type': 'of_t',
'nu_fnc': lambda t: [0.4, 0.1, 0.4, 0.1, .1][t-1],
},
'fnct9': { # NOTE - running now, wobble fnct7 differently
'U0': 500,
'S0': 600,
'nsims': 1000,
'nsamples': 1000,
'T': 5,
'mu_fnc_type': 'of_t',
'mu_fnc': lambda t: [0.1, 0.4, 0.1, 0.4, .1][t-1],
'nu_fnc_type': 'of_t',
'nu_fnc': lambda t: [0.5, 0, 0.2, 0, 1][t-1],
},
'fnct10': { # NOTE - running now - lots of early extinctions, only detect later
'U0': 500,
'S0': 600,
'nsims': 1000,
'nsamples': 1000,
'T': 5,
'mu_fnc_type': 'of_t',
'mu_fnc': lambda t: [0.6, 0.1, 0.05, 0.05, 0][t-1],
'nu_fnc_type': 'of_t',
'nu_fnc': lambda t: [0, 0, 0, 0.3, 1][t-1],
},
}
todo_list = ['fnct9']
# for runName, run in runs.items():
for runName in todo_list:
run = runs[runName]
U0 = run['U0']
S0 = run['S0']
# plot P(mu) and P(nu)
# ---
if plotPmu:
# mu's pdf
mu_fnc_type = run['mu_fnc_type']
if mu_fnc_type == 'of_t':
# should make a plot of t vs mu_t
T = run['T']
mu_fnc = run['mu_fnc']
tV = np.arange(1,T+1)
muV = [ mu_fnc(t) for t in tV ]
plt.figure(figsize=(8*0.3,6*0.3))
plt.scatter(tV, muV, color='black')
plt.xlabel(r'$t$', fontsize='x-large')
plt.ylabel(r'$\mu_t$', fontsize='x-large')
plt.xticks( tV )
plt.yticks( [0,1] )
plt.tight_layout()
plt.savefig(dName + runName + '_tmu.pdf')
plt.close()
else:
# should make a plot of mu_t vs P(mu_t)
mu_pdf_fnc = run['mu_pdf_fnc']
muV = run['muV']
p_mu = [ mu_pdf_fnc(mu) for mu in muV ]
plt.figure(figsize=(8*0.3,6*0.3))
if mu_fnc_type == 'continuous':
plt.plot(muV, p_mu, 'black', lw=3)
else:
plt.stem(muV, p_mu, 'black', markerfmt = 'ko', basefmt=" ")
plt.xlabel(r'$\mu_t$', fontsize='x-large')
plt.ylabel(r'$P(\mu_t)$', fontsize='x-large')
plt.xticks( [0,1] )
plt.yticks( [0,int(np.ceil(max(p_mu)))] )
plt.tight_layout()
plt.savefig(dName + runName + '_Pmu.pdf')
plt.close()
# nu's pdf
nu_fnc_type = run['mu_fnc_type']
if nu_fnc_type == 'of_t':
# should make a plot of t vs nu_t
T = run['T']
nu_fnc = run['nu_fnc']
tV = np.arange(1,T+1)
nuV = [ nu_fnc(t) for t in tV ]
plt.figure(figsize=(8*0.3,6*0.3))
plt.scatter(tV, nuV, color='black')
plt.xlabel(r'$t$', fontsize='x-large')
plt.ylabel(r'$\nu_t$', fontsize='x-large')
plt.xticks( tV )
plt.yticks( [0,1] )
plt.tight_layout()
plt.savefig(dName + runName + '_tnu.pdf')
plt.close()
else:
nu_pdf_fnc = run['nu_pdf_fnc']
nuV = run['nuV']
p_nu = [ nu_pdf_fnc(nu) for nu in nuV ]
plt.figure(figsize=(8*0.3,6*0.3))
if nu_fnc_type == 'continuous':
plt.plot(nuV, p_nu, 'black', lw=3)
else:
plt.stem(nuV, p_nu, 'black', markerfmt = 'ko', basefmt=" ")
plt.xlabel(r'$\nu_t$', fontsize='x-large')
plt.ylabel(r'$P(\nu_t)$', fontsize='x-large')
plt.xticks( [0,1] )
plt.yticks( [0,int(np.ceil(max(p_nu)))] )
plt.tight_layout()
plt.savefig(dName + runName + '_Pnu.pdf')
plt.close()
if plotegs:
T = run['T']
mu_fnc = run['mu_fnc']
nu_fnc = run['nu_fnc']
fName = dName + runName + '_eg.pdf' # where to save the plot
S_orig, E_orig, U_orig, X_orig, UV = get_example(U0, S0, mu_fnc, nu_fnc, T, negs)
plt.figure(figsize=(8*0.5,6*0.5))
# plot the simulation as "true" values
plt.plot(S_orig, 'green', lw=1, label = r'$S_t$')
plt.plot(E_orig, 'red', lw=1, label = r'$E_t$')
plt.plot(X_orig, 'blue', lw=1, label = r'$X_t$')
# plot each of the examples
for i, U in enumerate(UV):
if i == 0:
plt.plot(U, 'orange', lw=0.5, alpha=0.5, label = r'$U_t^{[i]}$')
else:
plt.plot(U, 'orange', lw=0.5, alpha=0.5)
# this one last so it's on top
plt.plot(U_orig, 'black', lw=2, label = r'$U_t$')
plt.xlabel('year')
plt.ylabel('number of species')
plt.legend(loc='upper right', fontsize='small')
plt.tight_layout()
plt.savefig(fName)
plt.close()
if getCIs:
nsims = run['nsims']
nsamples = run['nsamples']
T = run['T']
mu_fnc = run['mu_fnc']
nu_fnc = run['nu_fnc']
fName = dName + runName + '_coverage.pkl' # where to save the outcome
cnt_withinV, U0_meanV = get_coverage_estimates(nsims, nsamples, pcileV, U0, S0, T, mu_fnc, nu_fnc)
# save result to pickle file
f = open(fName, 'wb')
pickle.dump( pcileV, f )
pickle.dump( nsims, f )
pickle.dump( cnt_withinV, f )
pickle.dump( U0_meanV, f )
f.close()
if plotCIs:
fName = dName + runName + '_coverage.pkl' # where to save the outcome
f = open(fName, 'rb')
pcileV = pickle.load( f )
nsims = pickle.load( f )
cnt_withinV = pickle.load( f )
U0_meanV = pickle.load( f )
f.close()
coverage = 100*cnt_withinV/nsims
plt.figure(figsize=(8*0.5,6*0.5))
plt.plot( pcileV, pcileV, ls='dotted', color='black')
plt.scatter( pcileV, coverage, color='black')
plt.xlabel('nominal coverage desired')
plt.ylabel('actual coverage obtained')
fName = dName + runName + '_coverage.pdf' # where to save the outcome
plt.xlim( (0,100) )
plt.ylim( (0,100) )
plt.tight_layout()
plt.savefig(fName)
plt.close()
'''
If I have time
'const2': {
'nsims': 1000,
'nsamples': 1000,
'T': 5,
'mu_fnc_type': 'discrete',
'mu_fnc': lambda: 0.1 if stats.uniform.rvs() < 0.5 else 0.05,
'mu_pdf_fnc': lambda mu: 0.5 if mu == 0.1 else ( 0.5 if mu == 0.05 else 0 ),
'muV': [0.05, 0.1],
'nu_fnc_type': 'discrete',
'nu_fnc': lambda: 0.1 if stats.uniform.rvs() < 0.5 else 0.05,
'nu_pdf_fnc': lambda nu: 0.5 if nu == 0.1 else ( 0.5 if nu == 0.05 else 0 ),
'nuV': [0.05, 0.1],
},
'''
| 31.294
| 117
| 0.440979
|
438f45499636c44889d35d48998e29037a16cd13
| 1,153
|
py
|
Python
|
src/logger.py
|
hunterbly/TalkingBot
|
683a043af91909728c39eb949d90af55be7c6475
|
[
"Apache-2.0"
] | null | null | null |
src/logger.py
|
hunterbly/TalkingBot
|
683a043af91909728c39eb949d90af55be7c6475
|
[
"Apache-2.0"
] | null | null | null |
src/logger.py
|
hunterbly/TalkingBot
|
683a043af91909728c39eb949d90af55be7c6475
|
[
"Apache-2.0"
] | null | null | null |
import sys
import logging
def setup_logger(name = __name__,
file_level = "debug",
stream_level = "info"):
level_dict = {"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL}
file_level = level_dict[file_level]
stream_level = level_dict[stream_level]
base_level = min(file_level, stream_level)
# Initialize logger
# Set Logger level
logger = logging.getLogger(name)
logger.setLevel(base_level)
# Create file handler
handler = logging.FileHandler("./log/backtest.log")
handler.setLevel(file_level)
# Create stream handler
console = logging.StreamHandler()
console.setLevel(stream_level)
# Set log format
formatter = logging.Formatter(
"%(asctime)s - %(levelname)s - %(name)s - %(message)s"
)
handler.setFormatter(formatter)
console.setFormatter(formatter)
# Add handler to logger
logger.addHandler(handler)
logger.addHandler(console)
return logger
| 27.452381
| 62
| 0.620989
|
7f07f389bb6e0ba04f7ff1f499f5436bcece1817
| 1,983
|
py
|
Python
|
scripts/test2.py
|
ryujaehun/tenset
|
aaa91e48f997e4659ca318661b7be2cb5237b29d
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null |
scripts/test2.py
|
ryujaehun/tenset
|
aaa91e48f997e4659ca318661b7be2cb5237b29d
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null |
scripts/test2.py
|
ryujaehun/tenset
|
aaa91e48f997e4659ca318661b7be2cb5237b29d
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null |
#!/usr/bin/env python
import subprocess
import os
from datetime import datetime
import torch
import threading
today = datetime.today().strftime('%Y-%m-%d')
sem = threading.Semaphore(torch.cuda.device_count())
idx = 0
os.makedirs(f"log/{today}", exist_ok=True)
maml = [False,True]
loss = ['rmse','rankNetLoss','lambdaRankLoss','listNetLoss']
model = ['mlp','transformer','lstm']
lr = [1e-2,1e-3,1e-4,1e-5,1e-6]
wd = [1e-2,1e-3,1e-4,1e-5,1e-6]
MAML = True
class Worker(threading.Thread):
def __init__(self, loss, model,lr,wd):
super().__init__()
self.loss = loss
self.model = model
self.lr = lr
self.wd = wd
def run(self):
global MAML
global idx
sem.acquire()
idx += 1
if MAML:
text = f"docker run --ipc=host -it --gpus '\"device={idx%8}\"' --cpus 8 --rm -v /home/jaehun/tenset:/root/tvm -v /home/jaehun/tenset:/root test:latest python3 /root/scripts/train_model.py \
--maml --use-gpu --loss {self.loss} --models {self.model} --meta_outer_lr {self.lr} --meta_inner_lr {self.wd} >& log/{today}/MAML_{self.model}_{self.loss}_{self.lr}_{self.wd}.log"
else:
text = f"docker run --ipc=host -it --gpus '\"device={idx%8}\"' --cpus 8 --rm -v /home/jaehun/tenset:/root/tvm -v /home/jaehun/tenset:/root test:latest python3 /root/scripts/train_model.py \
--use-gpu --loss {self.loss} --models {self.model} --lr {self.lr} --wd {self.wd} >& log/{today}/{self.model}_{self.loss}_{self.lr}_{self.wd}.log"
proc = subprocess.Popen(text, shell=True, executable='/bin/bash')
_ = proc.communicate()
sem.release()
threads = []
for _loss in loss:
for _model in model:
for _lr in lr:
for _wd in wd:
thread = Worker(_loss,_model,_lr,_wd)
thread.start() # sub thread의 run 메서드를 호출
threads.append(thread)
for thread in threads:
thread.join()
| 35.410714
| 202
| 0.602118
|
ac97de89a81c6ea76d190ffb491d72c81b8eebef
| 2,301
|
py
|
Python
|
apps/accounts/migrations/0001_initial.py
|
ramseylove/project_management_api
|
9c76c4464baf7f9af6c977a42ccd7eb3ce205c7b
|
[
"MIT"
] | null | null | null |
apps/accounts/migrations/0001_initial.py
|
ramseylove/project_management_api
|
9c76c4464baf7f9af6c977a42ccd7eb3ce205c7b
|
[
"MIT"
] | null | null | null |
apps/accounts/migrations/0001_initial.py
|
ramseylove/project_management_api
|
9c76c4464baf7f9af6c977a42ccd7eb3ce205c7b
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.7 on 2021-08-24 18:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='CustomUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True, verbose_name='email address')),
('is_active', models.BooleanField(default=True)),
('is_admin', models.BooleanField(default=False)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='UserProfile',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='accounts.customuser')),
('name', models.CharField(blank=True, max_length=100, null=True)),
('phone', models.CharField(blank=True, max_length=50, null=True)),
('timezone', models.CharField(blank=True, max_length=50, null=True)),
],
),
]
| 52.295455
| 266
| 0.633638
|
4da587a71c7439096cc90e9f67f7f5cfa4f2cabd
| 88
|
py
|
Python
|
carto_renderer/__init__.py
|
EdsonGermano/socrata-platform-carto-renderer
|
675b4abb00317b7bc8fdcb57ff86376e5f044378
|
[
"Apache-2.0"
] | null | null | null |
carto_renderer/__init__.py
|
EdsonGermano/socrata-platform-carto-renderer
|
675b4abb00317b7bc8fdcb57ff86376e5f044378
|
[
"Apache-2.0"
] | 7
|
2015-08-03T21:14:06.000Z
|
2020-03-31T17:36:03.000Z
|
carto_renderer/__init__.py
|
EdsonGermano/socrata-platform-carto-renderer
|
675b4abb00317b7bc8fdcb57ff86376e5f044378
|
[
"Apache-2.0"
] | 1
|
2017-09-19T18:40:22.000Z
|
2017-09-19T18:40:22.000Z
|
"""
Flask service that will take JSON blobs containing vector tiles and style info.
"""
| 22
| 79
| 0.75
|
f5f860a5a03fa6a3930810d57e2b491b7f467f07
| 1,211
|
py
|
Python
|
tests/extmod/vfs_fat_oldproto.py
|
rodgergr/pycom-micropython-sigfox
|
50a31befc40a39b1e4c3513f20da968792227b0e
|
[
"MIT"
] | null | null | null |
tests/extmod/vfs_fat_oldproto.py
|
rodgergr/pycom-micropython-sigfox
|
50a31befc40a39b1e4c3513f20da968792227b0e
|
[
"MIT"
] | null | null | null |
tests/extmod/vfs_fat_oldproto.py
|
rodgergr/pycom-micropython-sigfox
|
50a31befc40a39b1e4c3513f20da968792227b0e
|
[
"MIT"
] | 1
|
2019-09-22T01:28:52.000Z
|
2019-09-22T01:28:52.000Z
|
try:
import uerrno
try:
import uos_vfs as uos
except ImportError:
import uos
except ImportError:
print("SKIP")
raise SystemExit
try:
uos.mkfat
except AttributeError:
print("SKIP")
raise SystemExit
class RAMFS_OLD:
SEC_SIZE = 512
def __init__(self, blocks):
self.data = bytearray(blocks * self.SEC_SIZE)
def readblocks(self, n, buf):
#print("readblocks(%s, %x(%d))" % (n, id(buf), len(buf)))
for i in range(len(buf)):
buf[i] = self.data[n * self.SEC_SIZE + i]
def writeblocks(self, n, buf):
#print("writeblocks(%s, %x)" % (n, id(buf)))
for i in range(len(buf)):
self.data[n * self.SEC_SIZE + i] = buf[i]
def sync(self):
pass
def count(self):
return len(self.data) // self.SEC_SIZE
try:
bdev = RAMFS_OLD(50)
except MemoryError:
print("SKIP")
raise SystemExit
uos.mkfat.mkfs(bdev)
vfs = uos.mkfat(bdev)
uos.mount(vfs, "/ramdisk")
# file io
with vfs.open("file.txt", "w") as f:
f.write("hello!")
print(list(vfs.ilistdir()))
with vfs.open("file.txt", "r") as f:
print(f.read())
vfs.remove("file.txt")
print(list(vfs.ilistdir()))
| 19.532258
| 65
| 0.58877
|
d98dc7d08ce94494570839e222ec7e9f497d0034
| 67
|
py
|
Python
|
ArcFace/__init__.py
|
liuwuliuyun/CFDS
|
9e86ace020b1191365e0ee5aca4c8614ebc4911b
|
[
"MIT"
] | 1
|
2018-12-22T02:19:19.000Z
|
2018-12-22T02:19:19.000Z
|
ArcFace/__init__.py
|
liuwuliuyun/CFDS
|
9e86ace020b1191365e0ee5aca4c8614ebc4911b
|
[
"MIT"
] | null | null | null |
ArcFace/__init__.py
|
liuwuliuyun/CFDS
|
9e86ace020b1191365e0ee5aca4c8614ebc4911b
|
[
"MIT"
] | null | null | null |
from .extractor import extractor
from .yliu_aligner import aligner
| 22.333333
| 33
| 0.850746
|
5dcbdfa65d35818a4dce870eac410d317ed3e93d
| 11,119
|
py
|
Python
|
tests/python/unittest/test_gluon_rnn.py
|
IIMarch/mxnet
|
64c35f2d41f5bad3f9cbf4d4fda9cf3bf3dadb4b
|
[
"Apache-2.0"
] | 4
|
2017-11-17T07:28:09.000Z
|
2019-07-23T06:24:16.000Z
|
tests/python/unittest/test_gluon_rnn.py
|
IIMarch/mxnet
|
64c35f2d41f5bad3f9cbf4d4fda9cf3bf3dadb4b
|
[
"Apache-2.0"
] | null | null | null |
tests/python/unittest/test_gluon_rnn.py
|
IIMarch/mxnet
|
64c35f2d41f5bad3f9cbf4d4fda9cf3bf3dadb4b
|
[
"Apache-2.0"
] | 2
|
2019-06-12T12:40:20.000Z
|
2020-11-03T14:33:14.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
from mxnet import gluon
import numpy as np
from numpy.testing import assert_allclose
def test_rnn():
cell = gluon.rnn.RNNCell(100, prefix='rnn_')
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]
outputs, _ = cell.unroll(3, inputs)
outputs = mx.sym.Group(outputs)
assert sorted(cell.collect_params().keys()) == ['rnn_h2h_bias', 'rnn_h2h_weight', 'rnn_i2h_bias', 'rnn_i2h_weight']
assert outputs.list_outputs() == ['rnn_t0_out_output', 'rnn_t1_out_output', 'rnn_t2_out_output']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))
assert outs == [(10, 100), (10, 100), (10, 100)]
def test_lstm():
cell = gluon.rnn.LSTMCell(100, prefix='rnn_')
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]
outputs, _ = cell.unroll(3, inputs)
outputs = mx.sym.Group(outputs)
assert sorted(cell.collect_params().keys()) == ['rnn_h2h_bias', 'rnn_h2h_weight', 'rnn_i2h_bias', 'rnn_i2h_weight']
assert outputs.list_outputs() == ['rnn_t0_out_output', 'rnn_t1_out_output', 'rnn_t2_out_output']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))
assert outs == [(10, 100), (10, 100), (10, 100)]
def test_lstm_forget_bias():
forget_bias = 2.0
stack = gluon.rnn.SequentialRNNCell()
stack.add(gluon.rnn.LSTMCell(100, i2h_bias_initializer=mx.init.LSTMBias(forget_bias), prefix='l0_'))
stack.add(gluon.rnn.LSTMCell(100, i2h_bias_initializer=mx.init.LSTMBias(forget_bias), prefix='l1_'))
dshape = (32, 1, 200)
data = mx.sym.Variable('data')
sym, _ = stack.unroll(1, data, merge_outputs=True)
mod = mx.mod.Module(sym, label_names=None, context=mx.cpu(0))
mod.bind(data_shapes=[('data', dshape)], label_shapes=None)
mod.init_params()
bias_argument = next(x for x in sym.list_arguments() if x.endswith('i2h_bias'))
expected_bias = np.hstack([np.zeros((100,)),
forget_bias * np.ones(100, ), np.zeros((2 * 100,))])
assert_allclose(mod.get_params()[0][bias_argument].asnumpy(), expected_bias)
def test_gru():
cell = gluon.rnn.GRUCell(100, prefix='rnn_')
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]
outputs, _ = cell.unroll(3, inputs)
outputs = mx.sym.Group(outputs)
assert sorted(cell.collect_params().keys()) == ['rnn_h2h_bias', 'rnn_h2h_weight', 'rnn_i2h_bias', 'rnn_i2h_weight']
assert outputs.list_outputs() == ['rnn_t0_out_output', 'rnn_t1_out_output', 'rnn_t2_out_output']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))
assert outs == [(10, 100), (10, 100), (10, 100)]
def test_residual():
cell = gluon.rnn.ResidualCell(gluon.rnn.GRUCell(50, prefix='rnn_'))
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(2)]
outputs, _ = cell.unroll(2, inputs)
outputs = mx.sym.Group(outputs)
assert sorted(cell.collect_params().keys()) == \
['rnn_h2h_bias', 'rnn_h2h_weight', 'rnn_i2h_bias', 'rnn_i2h_weight']
# assert outputs.list_outputs() == \
# ['rnn_t0_out_plus_residual_output', 'rnn_t1_out_plus_residual_output']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10, 50), rnn_t1_data=(10, 50))
assert outs == [(10, 50), (10, 50)]
outputs = outputs.eval(rnn_t0_data=mx.nd.ones((10, 50)),
rnn_t1_data=mx.nd.ones((10, 50)),
rnn_i2h_weight=mx.nd.zeros((150, 50)),
rnn_i2h_bias=mx.nd.zeros((150,)),
rnn_h2h_weight=mx.nd.zeros((150, 50)),
rnn_h2h_bias=mx.nd.zeros((150,)))
expected_outputs = np.ones((10, 50))
assert np.array_equal(outputs[0].asnumpy(), expected_outputs)
assert np.array_equal(outputs[1].asnumpy(), expected_outputs)
def test_residual_bidirectional():
cell = gluon.rnn.ResidualCell(
gluon.rnn.BidirectionalCell(
gluon.rnn.GRUCell(25, prefix='rnn_l_'),
gluon.rnn.GRUCell(25, prefix='rnn_r_')))
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(2)]
outputs, _ = cell.unroll(2, inputs, merge_outputs=False)
outputs = mx.sym.Group(outputs)
assert sorted(cell.collect_params().keys()) == \
['rnn_l_h2h_bias', 'rnn_l_h2h_weight', 'rnn_l_i2h_bias', 'rnn_l_i2h_weight',
'rnn_r_h2h_bias', 'rnn_r_h2h_weight', 'rnn_r_i2h_bias', 'rnn_r_i2h_weight']
# assert outputs.list_outputs() == \
# ['bi_t0_plus_residual_output', 'bi_t1_plus_residual_output']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10, 50), rnn_t1_data=(10, 50))
assert outs == [(10, 50), (10, 50)]
outputs = outputs.eval(rnn_t0_data=mx.nd.ones((10, 50))+5,
rnn_t1_data=mx.nd.ones((10, 50))+5,
rnn_l_i2h_weight=mx.nd.zeros((75, 50)),
rnn_l_i2h_bias=mx.nd.zeros((75,)),
rnn_l_h2h_weight=mx.nd.zeros((75, 25)),
rnn_l_h2h_bias=mx.nd.zeros((75,)),
rnn_r_i2h_weight=mx.nd.zeros((75, 50)),
rnn_r_i2h_bias=mx.nd.zeros((75,)),
rnn_r_h2h_weight=mx.nd.zeros((75, 25)),
rnn_r_h2h_bias=mx.nd.zeros((75,)))
expected_outputs = np.ones((10, 50))+5
assert np.array_equal(outputs[0].asnumpy(), expected_outputs)
assert np.array_equal(outputs[1].asnumpy(), expected_outputs)
def test_stack():
cell = gluon.rnn.SequentialRNNCell()
for i in range(5):
if i == 1:
cell.add(gluon.rnn.ResidualCell(gluon.rnn.LSTMCell(100, prefix='rnn_stack%d_' % i)))
else:
cell.add(gluon.rnn.LSTMCell(100, prefix='rnn_stack%d_'%i))
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]
outputs, _ = cell.unroll(3, inputs)
outputs = mx.sym.Group(outputs)
keys = sorted(cell.collect_params().keys())
for i in range(5):
assert 'rnn_stack%d_h2h_weight'%i in keys
assert 'rnn_stack%d_h2h_bias'%i in keys
assert 'rnn_stack%d_i2h_weight'%i in keys
assert 'rnn_stack%d_i2h_bias'%i in keys
assert outputs.list_outputs() == ['rnn_stack4_t0_out_output', 'rnn_stack4_t1_out_output', 'rnn_stack4_t2_out_output']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))
assert outs == [(10, 100), (10, 100), (10, 100)]
def test_bidirectional():
cell = gluon.rnn.BidirectionalCell(
gluon.rnn.LSTMCell(100, prefix='rnn_l0_'),
gluon.rnn.LSTMCell(100, prefix='rnn_r0_'),
output_prefix='rnn_bi_')
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]
outputs, _ = cell.unroll(3, inputs)
outputs = mx.sym.Group(outputs)
assert outputs.list_outputs() == ['rnn_bi_t0_output', 'rnn_bi_t1_output', 'rnn_bi_t2_output']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))
assert outs == [(10, 200), (10, 200), (10, 200)]
def test_zoneout():
cell = gluon.rnn.ZoneoutCell(gluon.rnn.RNNCell(100, prefix='rnn_'), zoneout_outputs=0.5,
zoneout_states=0.5)
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]
outputs, _ = cell.unroll(3, inputs)
outputs = mx.sym.Group(outputs)
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))
assert outs == [(10, 100), (10, 100), (10, 100)]
def check_rnn_forward(layer, inputs):
inputs.attach_grad()
layer.collect_params().initialize()
with mx.autograd.record():
layer.unroll(3, inputs, merge_outputs=True)[0].backward()
mx.autograd.backward(layer.unroll(3, inputs, merge_outputs=False)[0])
mx.nd.waitall()
def test_rnn_cells():
check_rnn_forward(gluon.rnn.LSTMCell(100, input_size=200), mx.nd.ones((8, 3, 200)))
check_rnn_forward(gluon.rnn.RNNCell(100, input_size=200), mx.nd.ones((8, 3, 200)))
check_rnn_forward(gluon.rnn.GRUCell(100, input_size=200), mx.nd.ones((8, 3, 200)))
bilayer = gluon.rnn.BidirectionalCell(gluon.rnn.LSTMCell(100, input_size=200),
gluon.rnn.LSTMCell(100, input_size=200))
check_rnn_forward(bilayer, mx.nd.ones((8, 3, 200)))
check_rnn_forward(gluon.rnn.DropoutCell(0.5), mx.nd.ones((8, 3, 200)))
check_rnn_forward(gluon.rnn.ZoneoutCell(gluon.rnn.LSTMCell(100, input_size=200),
0.5, 0.2),
mx.nd.ones((8, 3, 200)))
net = gluon.rnn.SequentialRNNCell()
net.add(gluon.rnn.LSTMCell(100, input_size=200))
net.add(gluon.rnn.RNNCell(100, input_size=100))
net.add(gluon.rnn.GRUCell(100, input_size=100))
check_rnn_forward(net, mx.nd.ones((8, 3, 200)))
def check_rnn_layer_forward(layer, inputs, states=None):
layer.collect_params().initialize()
with mx.autograd.record():
out = layer(inputs, states)
if states is not None:
assert isinstance(out, tuple) and len(out) == 2
out = out[0]
else:
assert isinstance(out, mx.nd.NDArray)
out.backward()
mx.nd.waitall()
def test_rnn_layers():
check_rnn_layer_forward(gluon.rnn.RNN(10, 2), mx.nd.ones((8, 3, 20)))
check_rnn_layer_forward(gluon.rnn.RNN(10, 2), mx.nd.ones((8, 3, 20)), mx.nd.ones((2, 3, 10)))
check_rnn_layer_forward(gluon.rnn.LSTM(10, 2), mx.nd.ones((8, 3, 20)))
check_rnn_layer_forward(gluon.rnn.LSTM(10, 2), mx.nd.ones((8, 3, 20)), [mx.nd.ones((2, 3, 10)), mx.nd.ones((2, 3, 10))])
check_rnn_layer_forward(gluon.rnn.GRU(10, 2), mx.nd.ones((8, 3, 20)))
check_rnn_layer_forward(gluon.rnn.GRU(10, 2), mx.nd.ones((8, 3, 20)), mx.nd.ones((2, 3, 10)))
net = gluon.nn.Sequential()
net.add(gluon.rnn.LSTM(10, 2, bidirectional=True))
net.add(gluon.nn.BatchNorm(axis=2))
net.add(gluon.nn.Flatten())
net.add(gluon.nn.Dense(3, activation='relu'))
net.collect_params().initialize()
with mx.autograd.record():
net(mx.nd.ones((2, 3, 10))).backward()
if __name__ == '__main__':
import nose
nose.runmodule()
| 45.199187
| 124
| 0.643763
|
895c580de251b978ea0bd673d3e79abcc37a9497
| 1,947
|
py
|
Python
|
utils/train.py
|
yueatsprograms/UDA_Self_Supervision
|
b256316283e74b5d1f16777f029c384ee9b6e2e7
|
[
"Unlicense"
] | 77
|
2019-09-27T02:21:48.000Z
|
2022-01-22T18:19:47.000Z
|
utils/train.py
|
yueatsprograms/UDA_Self_Supervision
|
b256316283e74b5d1f16777f029c384ee9b6e2e7
|
[
"Unlicense"
] | 7
|
2019-12-25T06:27:54.000Z
|
2021-09-28T11:19:28.000Z
|
utils/train.py
|
yueatsprograms/UDA_Self_Supervision
|
b256316283e74b5d1f16777f029c384ee9b6e2e7
|
[
"Unlicense"
] | 18
|
2019-10-07T02:26:20.000Z
|
2021-09-19T05:30:32.000Z
|
import torch
from utils.get_mmd import get_mmd
def test(dataloader, model):
model.eval()
correct = 0.0
total = 0.0
for batch_idx, (inputs, labels) in enumerate(dataloader):
inputs, labels = inputs.cuda(), labels.cuda()
with torch.no_grad():
outputs = model(inputs)
_, predicted = outputs.max(1)
total += labels.size(0)
correct += predicted.eq(labels).sum().item()
model.train()
return 1-correct/total
def train(args, net, ext, sstasks, criterion_cls, optimizer_cls, scheduler_cls,
sc_tr_loader, sc_te_loader, tg_te_loader):
net.train()
for sstask in sstasks:
sstask.head.train()
sstask.scheduler.step()
epoch_stats = []
for batch_idx, (sc_tr_inputs, sc_tr_labels) in enumerate(sc_tr_loader):
for sstask in sstasks:
sstask.train_batch()
sc_tr_inputs, sc_tr_labels = sc_tr_inputs.cuda(), sc_tr_labels.cuda()
optimizer_cls.zero_grad()
outputs_cls = net(sc_tr_inputs)
loss_cls = criterion_cls(outputs_cls, sc_tr_labels)
loss_cls.backward()
optimizer_cls.step()
if batch_idx % args.num_batches_per_test == 0:
sc_te_err = test(sc_te_loader, net)
tg_te_err = test(tg_te_loader, net)
mmd = get_mmd(sc_te_loader, tg_te_loader, ext)
us_te_err_av = []
for sstask in sstasks:
err_av, err_sc, err_tg = sstask.test()
us_te_err_av.append(err_av)
epoch_stats.append((batch_idx, len(sc_tr_loader), mmd, tg_te_err, sc_te_err, us_te_err_av))
display = ('Iteration %d/%d:' %(batch_idx, len(sc_tr_loader))).ljust(24)
display += '%.2f\t%.2f\t\t%.2f\t\t' %(mmd, tg_te_err*100, sc_te_err*100)
for err in us_te_err_av:
display += '%.2f\t' %(err*100)
print(display)
return epoch_stats
| 35.4
| 103
| 0.608629
|
90d8ed3b4df760391fa56e3be8df20fb500ed3be
| 1,569
|
py
|
Python
|
tests/test_registry.py
|
rafaelrds/python-statemachine
|
636961d7f23faa66ada62e139cff44dff08f4e8d
|
[
"MIT"
] | 1
|
2019-12-04T04:39:45.000Z
|
2019-12-04T04:39:45.000Z
|
tests/test_registry.py
|
FelkvnSinnvtel/python-statemachine
|
636961d7f23faa66ada62e139cff44dff08f4e8d
|
[
"MIT"
] | null | null | null |
tests/test_registry.py
|
FelkvnSinnvtel/python-statemachine
|
636961d7f23faa66ada62e139cff44dff08f4e8d
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from __future__ import absolute_import, unicode_literals
import mock
import pytest
def test_should_register_a_state_machine():
from statemachine import StateMachine, State, registry
class CampaignMachine(StateMachine):
"A workflow machine"
draft = State('Draft', initial=True)
producing = State('Being produced')
add_job = draft.to(draft) | producing.to(producing)
produce = draft.to(producing)
assert 'CampaignMachine' in registry._REGISTRY
assert registry.get_machine_cls('CampaignMachine') == CampaignMachine
@pytest.fixture()
def django_autodiscover_modules():
import sys
real_django = sys.modules.get('django')
django = mock.MagicMock()
module_loading = mock.MagicMock()
auto_discover_modules = module_loading.autodiscover_modules
sys.modules['django'] = django
sys.modules['django.utils.module_loading'] = module_loading
with mock.patch('statemachine.registry._autodiscover_modules', new=auto_discover_modules):
yield auto_discover_modules
del sys.modules['django']
del sys.modules['django.utils.module_loading']
if real_django:
sys.modules['django'] = real_django
def test_load_modules_should_call_autodiscover_modules(django_autodiscover_modules):
from statemachine.registry import load_modules
# given
modules = ['a', 'c', 'statemachine', 'statemachines']
# when
load_modules(modules)
# then
django_autodiscover_modules.assert_has_calls(
mock.call(m) for m in modules
)
| 27.051724
| 94
| 0.72594
|
4b483af26b27d5c60b10cc101f5caa6d0b4f4af5
| 485
|
py
|
Python
|
voxel_globe/ingest/tasks.py
|
ngageoint/voxel-globe
|
91f386de652b704942165889c10468b2c4cf4eec
|
[
"MIT"
] | 28
|
2015-07-27T23:57:24.000Z
|
2020-04-05T15:10:52.000Z
|
voxel_globe/ingest/tasks.py
|
VisionSystemsInc/voxel_globe
|
6eb3fca5586726428e9d914f7b730ca164c64a52
|
[
"MIT"
] | 50
|
2016-02-11T15:50:22.000Z
|
2016-10-27T22:38:27.000Z
|
voxel_globe/ingest/tasks.py
|
ngageoint/voxel-globe
|
91f386de652b704942165889c10468b2c4cf4eec
|
[
"MIT"
] | 8
|
2015-07-27T19:22:03.000Z
|
2021-01-04T09:44:48.000Z
|
from voxel_globe.common_tasks import shared_task, VipTask
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
@shared_task(base=VipTask, bind=True)
def cleanup(self, upload_session_id):
''' Clean up after successful ingest
Currently this only entails removing the upload session information '''
from voxel_globe.ingest.models import UploadSession
upload_session = UploadSession.objects.get(id=upload_session_id)
upload_session.delete()
| 28.529412
| 77
| 0.802062
|
f84869c212024707514ed152b3d4cfb91037324d
| 1,460
|
py
|
Python
|
examples/Old Examples/use_parameter_class.py
|
ongjj323/DNPLab
|
09fd9f21c3c48a3f122d0b0295cc982f689a9842
|
[
"MIT"
] | 4
|
2020-09-23T08:09:33.000Z
|
2022-02-10T22:02:11.000Z
|
examples/Old Examples/use_parameter_class.py
|
ongjj323/DNPLab
|
09fd9f21c3c48a3f122d0b0295cc982f689a9842
|
[
"MIT"
] | 126
|
2020-09-16T22:25:59.000Z
|
2022-03-29T17:15:27.000Z
|
examples/Old Examples/use_parameter_class.py
|
ongjj323/DNPLab
|
09fd9f21c3c48a3f122d0b0295cc982f689a9842
|
[
"MIT"
] | 5
|
2020-09-24T20:57:31.000Z
|
2021-08-19T01:52:16.000Z
|
"""This example shows ways to use Parameter class"""
import sys
sys.path.append("..")
import dnplab
from dnplab.dnpHydration import Parameter
"""1. Create a Child Parameter Class"""
class MyParam(Parameter):
pass
"""2. Getting and Setting Parameters"""
param = MyParam()
# Setting parameter: Class way
param.egg = 1
# Setting parameter: Dictionary way
param["egg"] = 2
# Getting parameter: Class or Dictionary way
print(param.egg, param["egg"], "should be 2 2")
# Both ways called the identical function.
"""3. Building Parameter from existing parameter"""
param1 = MyParam()
param1.egg = 1
param2 = MyParam(param1)
print(param1.egg, param2.egg, "should be 1 1")
# Or from existing dictionary
param3 = MyParam({"egg": 1})
print(param1.egg, param2.egg, param3.egg, "should be 1 1 1")
# Or from casual keywords
param4 = MyParam(egg=1, ham=2)
print(param1.egg, param2.egg, param3.egg, param4.egg, "should be 1 1 1 1")
"""4. Change existing parameter"""
param1 = MyParam(egg=1, ham=2)
# Class way
param1.egg += 1
# Dictionary way
param1.egg += 1
print(param1.egg, "should be 3")
# Batch update
param1.update(egg=4)
param1.update({"ham": 10})
param1.update(lettus=32, tomato=64)
print(param1.egg, param1.ham, param1.lettus, param1.tomato, "should be 4 10 32 64")
"""5. Equality"""
basic1 = MyParam(egg=1)
basic2 = MyParam(egg=1)
delux = MyParam(egg=10)
print(basic1 == basic2, "should be True")
print(basic1 == delux, "should be False")
| 20.56338
| 83
| 0.703425
|
b21783c1a6759d84d4b0b917ce0478ab609ba77b
| 1,251
|
py
|
Python
|
bottle_beaker.py
|
Infogroep/bottle-beaker
|
cc1be70ffc9e83046ae4d59892e92ed5a78d08f6
|
[
"MIT"
] | 18
|
2015-01-13T09:13:27.000Z
|
2020-09-07T05:15:21.000Z
|
bottle_beaker.py
|
Infogroep/bottle-beaker
|
cc1be70ffc9e83046ae4d59892e92ed5a78d08f6
|
[
"MIT"
] | 4
|
2015-11-04T17:45:05.000Z
|
2019-08-05T17:00:39.000Z
|
bottle_beaker.py
|
Infogroep/bottle-beaker
|
cc1be70ffc9e83046ae4d59892e92ed5a78d08f6
|
[
"MIT"
] | 5
|
2015-03-20T20:58:20.000Z
|
2020-01-14T20:58:26.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import bottle
import inspect
import beaker
from beaker import middleware
class BeakerPlugin(object):
name = 'beaker'
def __init__(self, keyword='beaker'):
"""
:param keyword: Keyword used to inject beaker in a route
"""
self.keyword = keyword
def setup(self, app):
""" Make sure that other installed plugins don't affect the same
keyword argument and check if metadata is available."""
for other in app.plugins:
if not isinstance(other, BeakerPlugin):
continue
if other.keyword == self.keyword:
raise bottle.PluginError("Found another beaker plugin "
"with conflicting settings ("
"non-unique keyword).")
def apply(self, callback, context):
args = inspect.getargspec(context['callback'])[0]
if self.keyword not in args:
return callback
def wrapper(*args, **kwargs):
kwargs[self.keyword] = beaker
kwargs["{0}_middleware".format(self.keyword)] = middleware
return callback(*args, **kwargs)
return wrapper
| 30.512195
| 72
| 0.572342
|
7e0637d41c809bece8a98c86cb86714e75d8272d
| 5,920
|
py
|
Python
|
vivisect/analysis/generic/switchcase.py
|
cmaruti/vivisect
|
828a9af1b662d6417cf4c78f20a4ba37a1b3b670
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-12-23T19:23:17.000Z
|
2020-12-23T19:23:17.000Z
|
vivisect/analysis/generic/switchcase.py
|
cmaruti/vivisect
|
828a9af1b662d6417cf4c78f20a4ba37a1b3b670
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
vivisect/analysis/generic/switchcase.py
|
cmaruti/vivisect
|
828a9af1b662d6417cf4c78f20a4ba37a1b3b670
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-12-23T19:23:58.000Z
|
2020-12-23T19:23:58.000Z
|
'''
Analysis plugin for supporting WorkspaceEmulators during analysis pass.
Finds and connects Switch Cases, most specifically from Microsoft.
'''
import envi
import envi.archs.i386 as e_i386
import vivisect
import vivisect.analysis.generic.codeblocks as vagc
def analyzeJmp(amod, emu, op, starteip):
'''
Top level logic
'''
test, ctx = testSwitch(emu.vw, op, starteip, emu)
if test:
output = makeSwitch(emu.vw, starteip, ctx['offarraybase'], ctx['indiroffbase'])
def testSwitch(vw, op, vajmp, emu=None):
'''
identifies and enumerates microsoft's switch-case methods.
'''
if not (op.iflags & envi.IF_BRANCH):
# vw.verbprint( "indirect branch is not correct type")
return False,None
backone = vw.getLocation(vajmp-1)
if backone == None:
#vw.verbprint( "previous instruction isn't defined")
return False,None
backtwo = vw.getLocation(backone[0]-1)
if backtwo == None:
#vw.verbprint( "two previous instruction isn't defined")
return False,None
filename = vw.getMemoryMap(vajmp)[3]
imagebase = vw.getFileMeta(filename, 'imagebase')
op1 = vw.parseOpcode(backone[0])
if op1.mnem != 'add':
#vw.verbprint( "previous instruction isn't an 'add'")
return False,None
baseoper = op1.opers[1]
if not isinstance(baseoper, e_i386.i386RegOper):
#vw.verbprint( "baseoper is not an i386RegOper: %s" % repr(baseoper))
return False,None
# this is a weak analysis failure, but a powerful confirmation.
if emu != None:
regbase = op1.getOperValue(1, emu)
if regbase != imagebase:
vw.verbprint( "reg != imagebase")
return False,None
# now check the instruction before that
op2 = vw.parseOpcode(backtwo[0])
if op2.mnem != 'mov':
vw.verbprint( "2nd previous instruction isn't an 'mov'")
return False,None
arrayoper = op2.opers[1]
if not (isinstance(arrayoper, e_i386.i386SibOper) and arrayoper.scale == 4):
vw.verbprint( "arrayoper is not an i386SibOper of size 4: %s" % repr(baseoper))
return False,None
ao_reg = arrayoper.reg & e_i386.RMETA_NMASK
if ao_reg != baseoper.reg:
vw.verbprint( "arrayoper.reg != baseoper.reg: %s != %s" % (ao_reg, baseoper.reg))
return False,None
offarraybase = arrayoper.disp
#initial check of the array. should point to the next va. we'll scrape it up later
offarrayfirst = vw.readMemValue(offarraybase+imagebase, 4)
if offarrayfirst+imagebase != vajmp+2:
vw.verbprint( "first ref is not the va after the jmp: %x != %x" % (offarrayfirst+imagebase, vajmp+2))
indiroffbase = None
# now check for the byte array before that
backthree = vw.getLocation(backtwo[0]-1) # this one is optional. first two are not.
if backthree != None:
op = vw.parseOpcode(backthree[0])
if op.mnem == 'movzx' and isinstance(op.opers[1], e_i386.i386SibOper) and \
op.opers[1].scale == 1:
vw.verbprint( "this is a double deref (hitting a byte array offset into the offset-array)")
indiroffbase = op.opers[1].disp
return True, {'indiroffbase':indiroffbase, 'offarraybase':offarraybase, }
def makeSwitch(vw, vajmp, offarraybase, indiroffbase=None):
'''
Makes the changes to the Workspace for the given jmp location. Handles
naming for all cases because naming wants to indicate larger context.
(future)If indiroffbase is not None, the indirection "database" is analyzed for naming
'''
filename = vw.getMemoryMap(vajmp)[3]
imagebase = vw.getFileMeta(filename, 'imagebase')
# we have identified this is a switch case
vw.verbprint( "FOUND MS SWITCH CASE SPRAY at 0x%x" % vajmp)
# roll through the offset array until imagebase+offset is not a valid pointer, points to non-op locations or splits instructions
count = 0
tracker = []
ptr = offarraybase
while True:
off = vw.readMemValue(ptr+imagebase, 4)
ova = imagebase + off
tgtva = makeSwitchCase(vw, vajmp, ova)
if not tgtva:
break
tracker.append((count, tgtva))
count += 1
ptr += 4
# FIXME: this doesn't take into account two-level derefs (indiroffbase)
naming = {}
for idx,va in tracker:
lst = naming.get(va)
if lst == None:
lst = []
naming[va] = lst
lst.append("%xh" % idx)
#TODO: analyze indiroffbase to determine case information
for va, opts in naming.items():
options = "_".join(opts)
name = "switch_case_%s_%.8x" % (options, va)
vw.makeName(va, name)
#TODO: analyze which paths handle which cases, name accordingly
#TODO: determine good hint for symbolik constraints
funcva = vw.getFunction(vajmp)
vw.makeName(vajmp, "jmp_switch_%.8x" % vajmp)
vagc.analyzeFunction(vw, funcva)
return tracker
def makeSwitchCase(vw, vaSwitch, vaCase):
'''
Handle minutia of each case, specifically, checking for validity and
making Xref and making code (if necessary)
'''
if not vw.isValidPointer(vaCase):
return False
loc = vw.getLocation(vaCase)
if loc != None:
if loc[0] != vaCase:
return False
if loc[vivisect.L_LTYPE] != vivisect.LOC_OP:
return False
else:
vw.makeCode(vaCase)
#if we reach here, we're going to assume the location is valid.
vw.verbprint( "0x%x MS Switch Case Spray: emu.getBranchNode( emu.curpath , 0x%x )" % (vaSwitch, vaCase))
vw.addXref(vaSwitch, vaCase, vivisect.REF_CODE)
return vaCase
if globals().get('vw'):
verbose = vw.verbose
vw.verbose = True
vw.vprint("Starting...")
findSwitchCase(vw)
vw.vprint("Done")
vw.verbose = verbose
| 33.446328
| 132
| 0.640541
|
2f2482ed9a963156174688921187259b6062325d
| 504
|
py
|
Python
|
corehq/messaging/scheduling/migrations/0004_timedschedule_start_offset.py
|
kkrampa/commcare-hq
|
d64d7cad98b240325ad669ccc7effb07721b4d44
|
[
"BSD-3-Clause"
] | 1
|
2020-05-05T13:10:01.000Z
|
2020-05-05T13:10:01.000Z
|
corehq/messaging/scheduling/migrations/0004_timedschedule_start_offset.py
|
kkrampa/commcare-hq
|
d64d7cad98b240325ad669ccc7effb07721b4d44
|
[
"BSD-3-Clause"
] | 1
|
2019-12-09T14:00:14.000Z
|
2019-12-09T14:00:14.000Z
|
corehq/messaging/scheduling/migrations/0004_timedschedule_start_offset.py
|
MaciejChoromanski/commcare-hq
|
fd7f65362d56d73b75a2c20d2afeabbc70876867
|
[
"BSD-3-Clause"
] | 5
|
2015-11-30T13:12:45.000Z
|
2019-07-01T19:27:07.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-07-26 22:19
from __future__ import unicode_literals
from __future__ import absolute_import
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scheduling', '0003_add_custom_metadata'),
]
operations = [
migrations.AddField(
model_name='timedschedule',
name='start_offset',
field=models.IntegerField(default=0),
),
]
| 22.909091
| 51
| 0.64881
|
be0326f7f16dae53e83af335c6e479ea456f230a
| 6,711
|
py
|
Python
|
parts/manifest.py
|
ghdrl95/Naver-Speech-Hackathon
|
10b4526d98ce535415cb91d24338790d9c175b63
|
[
"Apache-2.0"
] | 1
|
2019-11-11T06:07:31.000Z
|
2019-11-11T06:07:31.000Z
|
parts/manifest.py
|
ghdrl95/Naver-Speech-Hackathon
|
10b4526d98ce535415cb91d24338790d9c175b63
|
[
"Apache-2.0"
] | null | null | null |
parts/manifest.py
|
ghdrl95/Naver-Speech-Hackathon
|
10b4526d98ce535415cb91d24338790d9c175b63
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import re
import string
import numpy as np
import os
from .text import _clean_text
def normalize_string(s, labels, table, **unused_kwargs):
"""
Normalizes string. For example:
'call me at 8:00 pm!' -> 'call me at eight zero pm'
Args:
s: string to normalize
labels: labels used during model training.
Returns:
Normalized string
"""
def good_token(token, labels):
s = set(labels)
for t in token:
if not t in s:
return False
return True
try:
text = _clean_text(s, ["english_cleaners"], table).strip()
return ''.join([t for t in text if good_token(t, labels=labels)])
except:
print("WARNING: Normalizing {} failed".format(s))
return None
class Manifest(object):
def __init__(self, data_dir, manifest_paths, labels, blank_index, max_duration=None, pad_to_max=False,
min_duration=None, sort_by_duration=False, max_utts=0,
normalize=True, speed_perturbation=False, filter_speed=1.0):
self.labels_map = dict([(labels[i], i) for i in range(len(labels))])
self.blank_index = blank_index
self.max_duration= max_duration
ids = []
duration = 0.0
filtered_duration = 0.0
# If removing punctuation, make a list of punctuation to remove
table = None
if normalize:
# Punctuation to remove
punctuation = string.punctuation
punctuation = punctuation.replace("+", "")
punctuation = punctuation.replace("&", "")
### We might also want to consider:
### @ -> at
### # -> number, pound, hashtag
### ~ -> tilde
### _ -> underscore
### % -> percent
# If a punctuation symbol is inside our vocab, we do not remove from text
for l in labels:
punctuation = punctuation.replace(l, "")
# Turn all punctuation to whitespace
table = str.maketrans(punctuation, " " * len(punctuation))
for manifest_path in manifest_paths:
with open(manifest_path, "r", encoding="utf-8") as fh:
a=json.load(fh)
for data in a:
files_and_speeds = data['files']
if pad_to_max:
if not speed_perturbation:
min_speed = filter_speed
else:
min_speed = min(x['speed'] for x in files_and_speeds)
max_duration = self.max_duration * min_speed
data['duration'] = data['original_duration']
if min_duration is not None and data['duration'] < min_duration:
filtered_duration += data['duration']
continue
if max_duration is not None and data['duration'] > max_duration:
filtered_duration += data['duration']
continue
# Prune and normalize according to transcript
transcript_text = data[
'transcript'] if "transcript" in data else self.load_transcript(
data['text_filepath'])
if normalize:
transcript_text = normalize_string(transcript_text, labels=labels,
table=table)
if not isinstance(transcript_text, str):
print(
"WARNING: Got transcript: {}. It is not a string. Dropping data point".format(
transcript_text))
filtered_duration += data['duration']
continue
data["transcript"] = self.parse_transcript(transcript_text) # convert to vocab indices
if speed_perturbation:
audio_paths = [x['fname'] for x in files_and_speeds]
data['audio_duration'] = [x['duration'] for x in files_and_speeds]
else:
audio_paths = [x['fname'] for x in files_and_speeds if x['speed'] == filter_speed]
data['audio_duration'] = [x['duration'] for x in files_and_speeds if x['speed'] == filter_speed]
data['audio_filepath'] = [os.path.join(data_dir, x) for x in audio_paths]
data.pop('files')
data.pop('original_duration')
ids.append(data)
duration += data['duration']
if max_utts > 0 and len(ids) >= max_utts:
print(
'Stopping parsing %s as max_utts=%d' % (manifest_path, max_utts))
break
if sort_by_duration:
ids = sorted(ids, key=lambda x: x['duration'])
self._data = ids
self._size = len(ids)
self._duration = duration
self._filtered_duration = filtered_duration
def load_transcript(self, transcript_path):
with open(transcript_path, 'r', encoding="utf-8") as transcript_file:
transcript = transcript_file.read().replace('\n', '')
return transcript
def parse_transcript(self, transcript):
chars = [self.labels_map.get(x, self.blank_index) for x in list(transcript)]
transcript = list(filter(lambda x: x != self.blank_index, chars))
return transcript
def __getitem__(self, item):
return self._data[item]
def __len__(self):
return self._size
def __iter__(self):
return iter(self._data)
@property
def duration(self):
return self._duration
@property
def filtered_duration(self):
return self._filtered_duration
@property
def data(self):
return list(self._data)
| 39.245614
| 120
| 0.550887
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.