hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
213ded7d5d1d4daae54f8750ea95bfdda0b6f457
| 110
|
py
|
Python
|
src/utils/m_brain.py
|
Ragnamus/falco-newtoni
|
ba2eb39dd927ebbd7d054749824561a4cdbbfd2b
|
[
"MIT"
] | null | null | null |
src/utils/m_brain.py
|
Ragnamus/falco-newtoni
|
ba2eb39dd927ebbd7d054749824561a4cdbbfd2b
|
[
"MIT"
] | null | null | null |
src/utils/m_brain.py
|
Ragnamus/falco-newtoni
|
ba2eb39dd927ebbd7d054749824561a4cdbbfd2b
|
[
"MIT"
] | null | null | null |
class brain:
def __init__(self, name):
self.name = name
def get_mood(self):
return 1
| 15.714286
| 29
| 0.572727
|
308751fbc97f91a7070cdc4d029f7e3408e5be5a
| 28,532
|
py
|
Python
|
venv/Lib/site-packages/tensorflow/core/protobuf/struct_pb2.py
|
rexliu3/StockTradingBotCloud
|
46b732b9c05f73bc0e856a3c4a16854b6d12e18e
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/tensorflow/core/protobuf/struct_pb2.py
|
rexliu3/StockTradingBotCloud
|
46b732b9c05f73bc0e856a3c4a16854b6d12e18e
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/tensorflow/core/protobuf/struct_pb2.py
|
rexliu3/StockTradingBotCloud
|
46b732b9c05f73bc0e856a3c4a16854b6d12e18e
|
[
"MIT"
] | 1
|
2020-06-28T11:47:47.000Z
|
2020-06-28T11:47:47.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/core/protobuf/struct.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorflow.core.framework import tensor_shape_pb2 as tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2
from tensorflow.core.framework import types_pb2 as tensorflow_dot_core_dot_framework_dot_types__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/protobuf/struct.proto',
package='tensorflow',
syntax='proto3',
serialized_options=_b('ZHgithub.com/tensorflow/tensorflow/tensorflow/go/core/core_protos_go_proto'),
serialized_pb=_b('\n%tensorflow/core/protobuf/struct.proto\x12\ntensorflow\x1a,tensorflow/core/framework/tensor_shape.proto\x1a%tensorflow/core/framework/types.proto\"\xc7\x04\n\x0fStructuredValue\x12+\n\nnone_value\x18\x01 \x01(\x0b\x32\x15.tensorflow.NoneValueH\x00\x12\x17\n\rfloat64_value\x18\x0b \x01(\x01H\x00\x12\x15\n\x0bint64_value\x18\x0c \x01(\x12H\x00\x12\x16\n\x0cstring_value\x18\r \x01(\tH\x00\x12\x14\n\nbool_value\x18\x0e \x01(\x08H\x00\x12:\n\x12tensor_shape_value\x18\x1f \x01(\x0b\x32\x1c.tensorflow.TensorShapeProtoH\x00\x12\x32\n\x12tensor_dtype_value\x18 \x01(\x0e\x32\x14.tensorflow.DataTypeH\x00\x12\x38\n\x11tensor_spec_value\x18! \x01(\x0b\x32\x1b.tensorflow.TensorSpecProtoH\x00\x12\x34\n\x0ftype_spec_value\x18\" \x01(\x0b\x32\x19.tensorflow.TypeSpecProtoH\x00\x12+\n\nlist_value\x18\x33 \x01(\x0b\x32\x15.tensorflow.ListValueH\x00\x12-\n\x0btuple_value\x18\x34 \x01(\x0b\x32\x16.tensorflow.TupleValueH\x00\x12+\n\ndict_value\x18\x35 \x01(\x0b\x32\x15.tensorflow.DictValueH\x00\x12\x38\n\x11named_tuple_value\x18\x36 \x01(\x0b\x32\x1b.tensorflow.NamedTupleValueH\x00\x42\x06\n\x04kind\"\x0b\n\tNoneValue\"8\n\tListValue\x12+\n\x06values\x18\x01 \x03(\x0b\x32\x1b.tensorflow.StructuredValue\"9\n\nTupleValue\x12+\n\x06values\x18\x01 \x03(\x0b\x32\x1b.tensorflow.StructuredValue\"\x8a\x01\n\tDictValue\x12\x31\n\x06\x66ields\x18\x01 \x03(\x0b\x32!.tensorflow.DictValue.FieldsEntry\x1aJ\n\x0b\x46ieldsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12*\n\x05value\x18\x02 \x01(\x0b\x32\x1b.tensorflow.StructuredValue:\x02\x38\x01\"D\n\tPairValue\x12\x0b\n\x03key\x18\x01 \x01(\t\x12*\n\x05value\x18\x02 \x01(\x0b\x32\x1b.tensorflow.StructuredValue\"F\n\x0fNamedTupleValue\x12\x0c\n\x04name\x18\x01 \x01(\t\x12%\n\x06values\x18\x02 \x03(\x0b\x32\x15.tensorflow.PairValue\"q\n\x0fTensorSpecProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x05shape\x18\x02 \x01(\x0b\x32\x1c.tensorflow.TensorShapeProto\x12#\n\x05\x64type\x18\x03 \x01(\x0e\x32\x14.tensorflow.DataType\"\x8a\x03\n\rTypeSpecProto\x12@\n\x0ftype_spec_class\x18\x01 \x01(\x0e\x32\'.tensorflow.TypeSpecProto.TypeSpecClass\x12/\n\ntype_state\x18\x02 \x01(\x0b\x32\x1b.tensorflow.StructuredValue\x12\x1c\n\x14type_spec_class_name\x18\x03 \x01(\t\"\xe7\x01\n\rTypeSpecClass\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x16\n\x12SPARSE_TENSOR_SPEC\x10\x01\x12\x17\n\x13INDEXED_SLICES_SPEC\x10\x02\x12\x16\n\x12RAGGED_TENSOR_SPEC\x10\x03\x12\x15\n\x11TENSOR_ARRAY_SPEC\x10\x04\x12\x15\n\x11\x44\x41TA_DATASET_SPEC\x10\x05\x12\x16\n\x12\x44\x41TA_ITERATOR_SPEC\x10\x06\x12\x11\n\rOPTIONAL_SPEC\x10\x07\x12\x14\n\x10PER_REPLICA_SPEC\x10\x08\x12\x11\n\rVARIABLE_SPEC\x10\tBJZHgithub.com/tensorflow/tensorflow/tensorflow/go/core/core_protos_go_protob\x06proto3')
,
dependencies=[tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_types__pb2.DESCRIPTOR,])
_TYPESPECPROTO_TYPESPECCLASS = _descriptor.EnumDescriptor(
name='TypeSpecClass',
full_name='tensorflow.TypeSpecProto.TypeSpecClass',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SPARSE_TENSOR_SPEC', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INDEXED_SLICES_SPEC', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RAGGED_TENSOR_SPEC', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TENSOR_ARRAY_SPEC', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATA_DATASET_SPEC', index=5, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATA_ITERATOR_SPEC', index=6, number=6,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OPTIONAL_SPEC', index=7, number=7,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PER_REPLICA_SPEC', index=8, number=8,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='VARIABLE_SPEC', index=9, number=9,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=1416,
serialized_end=1647,
)
_sym_db.RegisterEnumDescriptor(_TYPESPECPROTO_TYPESPECCLASS)
_STRUCTUREDVALUE = _descriptor.Descriptor(
name='StructuredValue',
full_name='tensorflow.StructuredValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='none_value', full_name='tensorflow.StructuredValue.none_value', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='float64_value', full_name='tensorflow.StructuredValue.float64_value', index=1,
number=11, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='int64_value', full_name='tensorflow.StructuredValue.int64_value', index=2,
number=12, type=18, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='string_value', full_name='tensorflow.StructuredValue.string_value', index=3,
number=13, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bool_value', full_name='tensorflow.StructuredValue.bool_value', index=4,
number=14, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tensor_shape_value', full_name='tensorflow.StructuredValue.tensor_shape_value', index=5,
number=31, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tensor_dtype_value', full_name='tensorflow.StructuredValue.tensor_dtype_value', index=6,
number=32, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tensor_spec_value', full_name='tensorflow.StructuredValue.tensor_spec_value', index=7,
number=33, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type_spec_value', full_name='tensorflow.StructuredValue.type_spec_value', index=8,
number=34, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='list_value', full_name='tensorflow.StructuredValue.list_value', index=9,
number=51, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tuple_value', full_name='tensorflow.StructuredValue.tuple_value', index=10,
number=52, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dict_value', full_name='tensorflow.StructuredValue.dict_value', index=11,
number=53, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='named_tuple_value', full_name='tensorflow.StructuredValue.named_tuple_value', index=12,
number=54, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='kind', full_name='tensorflow.StructuredValue.kind',
index=0, containing_type=None, fields=[]),
],
serialized_start=139,
serialized_end=722,
)
_NONEVALUE = _descriptor.Descriptor(
name='NoneValue',
full_name='tensorflow.NoneValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=724,
serialized_end=735,
)
_LISTVALUE = _descriptor.Descriptor(
name='ListValue',
full_name='tensorflow.ListValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='values', full_name='tensorflow.ListValue.values', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=737,
serialized_end=793,
)
_TUPLEVALUE = _descriptor.Descriptor(
name='TupleValue',
full_name='tensorflow.TupleValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='values', full_name='tensorflow.TupleValue.values', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=795,
serialized_end=852,
)
_DICTVALUE_FIELDSENTRY = _descriptor.Descriptor(
name='FieldsEntry',
full_name='tensorflow.DictValue.FieldsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='tensorflow.DictValue.FieldsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='tensorflow.DictValue.FieldsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=919,
serialized_end=993,
)
_DICTVALUE = _descriptor.Descriptor(
name='DictValue',
full_name='tensorflow.DictValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='fields', full_name='tensorflow.DictValue.fields', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_DICTVALUE_FIELDSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=855,
serialized_end=993,
)
_PAIRVALUE = _descriptor.Descriptor(
name='PairValue',
full_name='tensorflow.PairValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='tensorflow.PairValue.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='tensorflow.PairValue.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=995,
serialized_end=1063,
)
_NAMEDTUPLEVALUE = _descriptor.Descriptor(
name='NamedTupleValue',
full_name='tensorflow.NamedTupleValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='tensorflow.NamedTupleValue.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='values', full_name='tensorflow.NamedTupleValue.values', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1065,
serialized_end=1135,
)
_TENSORSPECPROTO = _descriptor.Descriptor(
name='TensorSpecProto',
full_name='tensorflow.TensorSpecProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='tensorflow.TensorSpecProto.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='shape', full_name='tensorflow.TensorSpecProto.shape', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dtype', full_name='tensorflow.TensorSpecProto.dtype', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1137,
serialized_end=1250,
)
_TYPESPECPROTO = _descriptor.Descriptor(
name='TypeSpecProto',
full_name='tensorflow.TypeSpecProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type_spec_class', full_name='tensorflow.TypeSpecProto.type_spec_class', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type_state', full_name='tensorflow.TypeSpecProto.type_state', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type_spec_class_name', full_name='tensorflow.TypeSpecProto.type_spec_class_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_TYPESPECPROTO_TYPESPECCLASS,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1253,
serialized_end=1647,
)
_STRUCTUREDVALUE.fields_by_name['none_value'].message_type = _NONEVALUE
_STRUCTUREDVALUE.fields_by_name['tensor_shape_value'].message_type = tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2._TENSORSHAPEPROTO
_STRUCTUREDVALUE.fields_by_name['tensor_dtype_value'].enum_type = tensorflow_dot_core_dot_framework_dot_types__pb2._DATATYPE
_STRUCTUREDVALUE.fields_by_name['tensor_spec_value'].message_type = _TENSORSPECPROTO
_STRUCTUREDVALUE.fields_by_name['type_spec_value'].message_type = _TYPESPECPROTO
_STRUCTUREDVALUE.fields_by_name['list_value'].message_type = _LISTVALUE
_STRUCTUREDVALUE.fields_by_name['tuple_value'].message_type = _TUPLEVALUE
_STRUCTUREDVALUE.fields_by_name['dict_value'].message_type = _DICTVALUE
_STRUCTUREDVALUE.fields_by_name['named_tuple_value'].message_type = _NAMEDTUPLEVALUE
_STRUCTUREDVALUE.oneofs_by_name['kind'].fields.append(
_STRUCTUREDVALUE.fields_by_name['none_value'])
_STRUCTUREDVALUE.fields_by_name['none_value'].containing_oneof = _STRUCTUREDVALUE.oneofs_by_name['kind']
_STRUCTUREDVALUE.oneofs_by_name['kind'].fields.append(
_STRUCTUREDVALUE.fields_by_name['float64_value'])
_STRUCTUREDVALUE.fields_by_name['float64_value'].containing_oneof = _STRUCTUREDVALUE.oneofs_by_name['kind']
_STRUCTUREDVALUE.oneofs_by_name['kind'].fields.append(
_STRUCTUREDVALUE.fields_by_name['int64_value'])
_STRUCTUREDVALUE.fields_by_name['int64_value'].containing_oneof = _STRUCTUREDVALUE.oneofs_by_name['kind']
_STRUCTUREDVALUE.oneofs_by_name['kind'].fields.append(
_STRUCTUREDVALUE.fields_by_name['string_value'])
_STRUCTUREDVALUE.fields_by_name['string_value'].containing_oneof = _STRUCTUREDVALUE.oneofs_by_name['kind']
_STRUCTUREDVALUE.oneofs_by_name['kind'].fields.append(
_STRUCTUREDVALUE.fields_by_name['bool_value'])
_STRUCTUREDVALUE.fields_by_name['bool_value'].containing_oneof = _STRUCTUREDVALUE.oneofs_by_name['kind']
_STRUCTUREDVALUE.oneofs_by_name['kind'].fields.append(
_STRUCTUREDVALUE.fields_by_name['tensor_shape_value'])
_STRUCTUREDVALUE.fields_by_name['tensor_shape_value'].containing_oneof = _STRUCTUREDVALUE.oneofs_by_name['kind']
_STRUCTUREDVALUE.oneofs_by_name['kind'].fields.append(
_STRUCTUREDVALUE.fields_by_name['tensor_dtype_value'])
_STRUCTUREDVALUE.fields_by_name['tensor_dtype_value'].containing_oneof = _STRUCTUREDVALUE.oneofs_by_name['kind']
_STRUCTUREDVALUE.oneofs_by_name['kind'].fields.append(
_STRUCTUREDVALUE.fields_by_name['tensor_spec_value'])
_STRUCTUREDVALUE.fields_by_name['tensor_spec_value'].containing_oneof = _STRUCTUREDVALUE.oneofs_by_name['kind']
_STRUCTUREDVALUE.oneofs_by_name['kind'].fields.append(
_STRUCTUREDVALUE.fields_by_name['type_spec_value'])
_STRUCTUREDVALUE.fields_by_name['type_spec_value'].containing_oneof = _STRUCTUREDVALUE.oneofs_by_name['kind']
_STRUCTUREDVALUE.oneofs_by_name['kind'].fields.append(
_STRUCTUREDVALUE.fields_by_name['list_value'])
_STRUCTUREDVALUE.fields_by_name['list_value'].containing_oneof = _STRUCTUREDVALUE.oneofs_by_name['kind']
_STRUCTUREDVALUE.oneofs_by_name['kind'].fields.append(
_STRUCTUREDVALUE.fields_by_name['tuple_value'])
_STRUCTUREDVALUE.fields_by_name['tuple_value'].containing_oneof = _STRUCTUREDVALUE.oneofs_by_name['kind']
_STRUCTUREDVALUE.oneofs_by_name['kind'].fields.append(
_STRUCTUREDVALUE.fields_by_name['dict_value'])
_STRUCTUREDVALUE.fields_by_name['dict_value'].containing_oneof = _STRUCTUREDVALUE.oneofs_by_name['kind']
_STRUCTUREDVALUE.oneofs_by_name['kind'].fields.append(
_STRUCTUREDVALUE.fields_by_name['named_tuple_value'])
_STRUCTUREDVALUE.fields_by_name['named_tuple_value'].containing_oneof = _STRUCTUREDVALUE.oneofs_by_name['kind']
_LISTVALUE.fields_by_name['values'].message_type = _STRUCTUREDVALUE
_TUPLEVALUE.fields_by_name['values'].message_type = _STRUCTUREDVALUE
_DICTVALUE_FIELDSENTRY.fields_by_name['value'].message_type = _STRUCTUREDVALUE
_DICTVALUE_FIELDSENTRY.containing_type = _DICTVALUE
_DICTVALUE.fields_by_name['fields'].message_type = _DICTVALUE_FIELDSENTRY
_PAIRVALUE.fields_by_name['value'].message_type = _STRUCTUREDVALUE
_NAMEDTUPLEVALUE.fields_by_name['values'].message_type = _PAIRVALUE
_TENSORSPECPROTO.fields_by_name['shape'].message_type = tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2._TENSORSHAPEPROTO
_TENSORSPECPROTO.fields_by_name['dtype'].enum_type = tensorflow_dot_core_dot_framework_dot_types__pb2._DATATYPE
_TYPESPECPROTO.fields_by_name['type_spec_class'].enum_type = _TYPESPECPROTO_TYPESPECCLASS
_TYPESPECPROTO.fields_by_name['type_state'].message_type = _STRUCTUREDVALUE
_TYPESPECPROTO_TYPESPECCLASS.containing_type = _TYPESPECPROTO
DESCRIPTOR.message_types_by_name['StructuredValue'] = _STRUCTUREDVALUE
DESCRIPTOR.message_types_by_name['NoneValue'] = _NONEVALUE
DESCRIPTOR.message_types_by_name['ListValue'] = _LISTVALUE
DESCRIPTOR.message_types_by_name['TupleValue'] = _TUPLEVALUE
DESCRIPTOR.message_types_by_name['DictValue'] = _DICTVALUE
DESCRIPTOR.message_types_by_name['PairValue'] = _PAIRVALUE
DESCRIPTOR.message_types_by_name['NamedTupleValue'] = _NAMEDTUPLEVALUE
DESCRIPTOR.message_types_by_name['TensorSpecProto'] = _TENSORSPECPROTO
DESCRIPTOR.message_types_by_name['TypeSpecProto'] = _TYPESPECPROTO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
StructuredValue = _reflection.GeneratedProtocolMessageType('StructuredValue', (_message.Message,), {
'DESCRIPTOR' : _STRUCTUREDVALUE,
'__module__' : 'tensorflow.core.protobuf.struct_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.StructuredValue)
})
_sym_db.RegisterMessage(StructuredValue)
NoneValue = _reflection.GeneratedProtocolMessageType('NoneValue', (_message.Message,), {
'DESCRIPTOR' : _NONEVALUE,
'__module__' : 'tensorflow.core.protobuf.struct_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.NoneValue)
})
_sym_db.RegisterMessage(NoneValue)
ListValue = _reflection.GeneratedProtocolMessageType('ListValue', (_message.Message,), {
'DESCRIPTOR' : _LISTVALUE,
'__module__' : 'tensorflow.core.protobuf.struct_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.ListValue)
})
_sym_db.RegisterMessage(ListValue)
TupleValue = _reflection.GeneratedProtocolMessageType('TupleValue', (_message.Message,), {
'DESCRIPTOR' : _TUPLEVALUE,
'__module__' : 'tensorflow.core.protobuf.struct_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.TupleValue)
})
_sym_db.RegisterMessage(TupleValue)
DictValue = _reflection.GeneratedProtocolMessageType('DictValue', (_message.Message,), {
'FieldsEntry' : _reflection.GeneratedProtocolMessageType('FieldsEntry', (_message.Message,), {
'DESCRIPTOR' : _DICTVALUE_FIELDSENTRY,
'__module__' : 'tensorflow.core.protobuf.struct_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.DictValue.FieldsEntry)
})
,
'DESCRIPTOR' : _DICTVALUE,
'__module__' : 'tensorflow.core.protobuf.struct_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.DictValue)
})
_sym_db.RegisterMessage(DictValue)
_sym_db.RegisterMessage(DictValue.FieldsEntry)
PairValue = _reflection.GeneratedProtocolMessageType('PairValue', (_message.Message,), {
'DESCRIPTOR' : _PAIRVALUE,
'__module__' : 'tensorflow.core.protobuf.struct_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.PairValue)
})
_sym_db.RegisterMessage(PairValue)
NamedTupleValue = _reflection.GeneratedProtocolMessageType('NamedTupleValue', (_message.Message,), {
'DESCRIPTOR' : _NAMEDTUPLEVALUE,
'__module__' : 'tensorflow.core.protobuf.struct_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.NamedTupleValue)
})
_sym_db.RegisterMessage(NamedTupleValue)
TensorSpecProto = _reflection.GeneratedProtocolMessageType('TensorSpecProto', (_message.Message,), {
'DESCRIPTOR' : _TENSORSPECPROTO,
'__module__' : 'tensorflow.core.protobuf.struct_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.TensorSpecProto)
})
_sym_db.RegisterMessage(TensorSpecProto)
TypeSpecProto = _reflection.GeneratedProtocolMessageType('TypeSpecProto', (_message.Message,), {
'DESCRIPTOR' : _TYPESPECPROTO,
'__module__' : 'tensorflow.core.protobuf.struct_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.TypeSpecProto)
})
_sym_db.RegisterMessage(TypeSpecProto)
DESCRIPTOR._options = None
_DICTVALUE_FIELDSENTRY._options = None
# @@protoc_insertion_point(module_scope)
| 42.585075
| 2,719
| 0.766473
|
172fb74f4b36cffc74a61d487ea21fe00d9d0bf1
| 6,067
|
py
|
Python
|
spark_rdkit_run.py
|
mariolov-ric/pyspark_rdkit
|
3c4b6ffd4e1d2aa5fb6b2b9b6d5c9379f4baa252
|
[
"MIT"
] | 2
|
2020-02-19T18:45:08.000Z
|
2020-04-23T12:45:00.000Z
|
spark_rdkit_run.py
|
mariolov-ric/pyspark_rdkit
|
3c4b6ffd4e1d2aa5fb6b2b9b6d5c9379f4baa252
|
[
"MIT"
] | null | null | null |
spark_rdkit_run.py
|
mariolov-ric/pyspark_rdkit
|
3c4b6ffd4e1d2aa5fb6b2b9b6d5c9379f4baa252
|
[
"MIT"
] | 2
|
2019-08-08T08:50:04.000Z
|
2020-09-23T14:31:57.000Z
|
'''
Author: Mario Lovric, Know-Center, Graz, Austria
MIT License
Copyright (c) 2018 Mario Lovric
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
"""This file contains the code described in the journal article in
Molecular Informatics: https://doi.org/10.1002/minf.201800082
The main guard largely contains code to parse commandline arguments and
setup the spark context. The experiment is run for a various amount of
instance sizes in order to compare their run times.
Run pyspark park_rdkit_run.py -h in order to get the number of
required arguments.
"""
import time
import os
import json
from datetime import datetime
import argparse
import numpy as np
from pyspark import SparkConf, SparkContext
from pyspark.sql import SQLContext
from rdkit import Chem
from rdkit.Chem import Descriptors
from rdkit.ML.Descriptors import MoleculeDescriptors
from rdkit import DataStructs
from rdkit.Chem.Fingerprints import FingerprintMols
from utils import (rdd2smile, clean, comp_desc, desc_dict, _fng_mol, _sim)
def run_experiment(filepath):
"""Run experiment for comparison on runtime for different amount of compounds as SMILES strings.
The experiment is run for a various number block sizes.
Parameters:
----------
filepath : str path to parquet file
"""
# list of descriptors from the RDKit Descriptors modules
descriptors = list(np.array(Descriptors._descList)[:, 0])
# the calculator module from RDKit
calculator = MoleculeDescriptors.MolecularDescriptorCalculator(descriptors)
df = sql_context.read.parquet(filepath)
sqlContext.read.parquet
# l1 diclofenac
l1 = FingerprintMols.FingerprintMol(Chem.MolFromSmiles('C1=CC=C(C(=C1)CC(=O)O)NC2=C(C=CC=C2Cl)Cl'))
with open('logfile.json', 'w') as json_file:
for num_lines in [2e6]:
smiles_rdd = df.select('can_smiles') \
.limit(num_lines).rdd.repartition(1000)
# map the previously defined function to the SMILES RDD
cleaned_rdd = smiles_rdd.map(clean)
# convert rdd to data frame, remove None values, assign to cleaned_df
cleaned_df = cleaned_rdd.map(
lambda x: (x,)).toDF(['smiles']) \
.dropna(thresh=1, subset=('smiles'))
smiles_clean_rdd = cleaned_df.rdd.repartition(1000).persist()
start_query = datetime.now()
# create RDD of MOL by using the map module and
# MolFromSmiles from RDKit, run cleaning again
mol_rdd = smiles_clean_rdd.map(
lambda x: Chem.MolFromSmiles(rdd2smile(x)))
# assign results of the query for substructure 'CO' to sub_rdd
# documentation from RDKit:
# http://www.rdkit.org/Python_Docs/rdkit.Chem.rdchem.Mol-class.html#HasSubstructMatch
sub_rdd = mol_rdd.map(
lambda x: x.HasSubstructMatch(Chem.MolFromSmiles('CO')))
count = sub_rdd.collect().count(True)
end_query = datetime.now()
mol_rdd = smiles_clean_rdd.map(lambda x: Chem.MolFromSmiles(rdd2smile(x)))
fng_rdd = mol_rdd.map(lambda x: _fng_mol(x))
sim_sol = fng_rdd.map(lambda x: _sim(l1, x)).filter(lambda x: x == 1.0).countByValue()
startDesc = datetime.now()
desc_data_rdd = smiles_clean_rdd.map(
lambda x: comp_desc(rdd2smile(x), calculator))
descriptors_df = desc_data_rdd.map(
lambda x: desc_dict(x)).toDF(descriptors)
descriptors_df.write.parquet('descriptor_data_v'
+ str(time.time())
+ '.parquet')
ex_dict = {
'test time': str(datetime.now()),
'molecules': str(num_lines),
'totalTime': str(datetime.now() - start_time),
'queryTime': str(end_query - start_query),
'descTime': str(datetime.now() - startDesc),
'simTime': str(startDesc - end_query),
'queryResultTrue': str(count)
}
json_file.dump(ex_dict)
json_file.dump('\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('filepath', help='path to parquet file',
type=str)
parser.add_argument('interpreter', help='interpreter to be used to'
+ 'run the experiments.', type=str)
args = parser.parse_args()
path = args.filepath
interpreter = args.interpreter
os.environ['SPARK_MAJOR_VERSION'] = '1'
os.environ['PYSPARK_PYTHON'] = interpreter
conf = (SparkConf()
.setAppName('app')
.setMaster('yarn-client')
.setAll([('spark.executor.memory', '80g'),
('spark.executor.cores', '12'),
('spark.cores.max', '12'),
('spark.driver.memory', '80g')]))
sc = SparkContext(conf=conf)
sql_context = SQLContext(sc)
start_time = datetime.now()
run_experiment(path)
| 39.653595
| 103
| 0.657491
|
8482f0344f02bdd242abc4c78eb4e8258a06751f
| 1,592
|
py
|
Python
|
380-Insert-Delete-GetRandom-O(1)/solution.py
|
Tanych/CodeTracking
|
86f1cb98de801f58c39d9a48ce9de12df7303d20
|
[
"MIT"
] | null | null | null |
380-Insert-Delete-GetRandom-O(1)/solution.py
|
Tanych/CodeTracking
|
86f1cb98de801f58c39d9a48ce9de12df7303d20
|
[
"MIT"
] | null | null | null |
380-Insert-Delete-GetRandom-O(1)/solution.py
|
Tanych/CodeTracking
|
86f1cb98de801f58c39d9a48ce9de12df7303d20
|
[
"MIT"
] | null | null | null |
class RandomizedSet(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.valset=[]
# record the index to remove
self.dictional={}
self.index=0
def insert(self, val):
"""
Inserts a value to the set. Returns true if the set did not already contain the specified element.
:type val: int
:rtype: bool
"""
try:
idx=self.dictional[val]
return False
except KeyError:
self.valset.append(val)
self.dictional[val]=self.index
self.index+=1
return True
def remove(self, val):
"""
Removes a value from the set. Returns true if the set contained the specified element.
:type val: int
:rtype: bool
"""
try:
idx=self.dictional[val]
self.valset[-1],self.valset[idx]=self.valset[idx], self.valset[-1]
self.dictional[self.valset[idx]]=idx
self.valset.pop()
self.index-=1
del self.dictional[val]
return True
except KeyError:
return False
def getRandom(self):
"""
Get a random element from the set.
:rtype: int
"""
import random
return self.valset[random.randint(0, len(self.valset)-1)]
# Your RandomizedSet object will be instantiated and called as such:
# obj = RandomizedSet()
# param_1 = obj.insert(val)
# param_2 = obj.remove(val)
# param_3 = obj.getRandom()
| 26.983051
| 106
| 0.544598
|
8798eb08627fe333e699ff097afaaeb937b455a8
| 623
|
py
|
Python
|
pypolodb/test_db.py
|
josephhuxley/PoloDB
|
218e9e50daee5849ff8839487e5e1c23132569e8
|
[
"MIT"
] | null | null | null |
pypolodb/test_db.py
|
josephhuxley/PoloDB
|
218e9e50daee5849ff8839487e5e1c23132569e8
|
[
"MIT"
] | null | null | null |
pypolodb/test_db.py
|
josephhuxley/PoloDB
|
218e9e50daee5849ff8839487e5e1c23132569e8
|
[
"MIT"
] | null | null | null |
import polodb
import os.path
DB_PATH = '/tmp/test-py.db'
def test_open():
if os.path.exists(DB_PATH):
print('database exist, remove')
os.remove(DB_PATH)
db = polodb.Database(DB_PATH)
db.close()
def test_create_collection():
db = polodb.Database(DB_PATH)
try:
collection = db.createCollection('test')
collection.insert({
'name': 'Vincent Chan',
'age': 14,
})
result = collection.find({
'name': 'Vincent Chan',
'age': 14,
})
assert len(result) == 1
assert result[0]['name'] == 'Vincent Chan'
assert result[0]['age'] == 14
finally:
db.close()
| 20.766667
| 46
| 0.608347
|
5f25037491e8ea16577ec886d8c1f715b8c90bb3
| 1,144
|
py
|
Python
|
docs/examples/savorizing.py
|
sdruskat/yatiml
|
4f55c058b72388350f0af3076ac3ea9bc1c142b0
|
[
"Apache-2.0"
] | null | null | null |
docs/examples/savorizing.py
|
sdruskat/yatiml
|
4f55c058b72388350f0af3076ac3ea9bc1c142b0
|
[
"Apache-2.0"
] | null | null | null |
docs/examples/savorizing.py
|
sdruskat/yatiml
|
4f55c058b72388350f0af3076ac3ea9bc1c142b0
|
[
"Apache-2.0"
] | null | null | null |
from ruamel import yaml
from typing import Optional, Union
import yatiml
# Create document class
class Submission:
def __init__(
self,
name: str,
age: Union[int, str],
tool: Optional[str]=None
) -> None:
self.name = name
self.age = age
self.tool = tool
@classmethod
def yatiml_savorize(cls, node: yatiml.Node) -> None:
str_to_int = {
'five': 5,
'six': 6,
'seven': 7,
}
if node.has_attribute_type('age', str):
str_val = node.get_attribute('age').get_value()
if str_val in str_to_int:
node.set_attribute('age', str_to_int[str_val])
else:
raise yatiml.SeasoningError('Invalid age string')
# Create loader
class MyLoader(yatiml.Loader):
pass
yatiml.add_to_loader(MyLoader, Submission)
yatiml.set_document_type(MyLoader, Submission)
# Load YAML
yaml_text = ('name: Janice\n'
'age: six\n')
doc = yaml.load(yaml_text, Loader=MyLoader)
print(doc.name)
print(doc.age)
print(doc.tool)
| 23.833333
| 65
| 0.572552
|
05e54d423c003a8b7d84c2227f4a284de1682a96
| 1,401
|
py
|
Python
|
ui/query.py
|
minminfly68/Song-recommendation-Project-CAPP-30122-
|
9f97d6accdfd33c5bac267980b6c10d6d5b93bc7
|
[
"MIT"
] | null | null | null |
ui/query.py
|
minminfly68/Song-recommendation-Project-CAPP-30122-
|
9f97d6accdfd33c5bac267980b6c10d6d5b93bc7
|
[
"MIT"
] | 6
|
2021-03-19T03:18:44.000Z
|
2021-09-22T19:00:52.000Z
|
ui/query.py
|
minminfly68/Song-recommendation-Project-CAPP-30122-
|
9f97d6accdfd33c5bac267980b6c10d6d5b93bc7
|
[
"MIT"
] | null | null | null |
'''
Query Result
Chun Hu, Yimin Li, Tianyue Niu
'''
import pandas as pd
import random
import os
cwd = os.path.dirname(__file__)
top_song_path = os.path.join(cwd, 'top_songs.csv')
def execute(dict_command):
'''
Input: dict_command(dict), a dictionary of user's input
e.g. dict_command might look something like this:
{'happy': 'neutral', 'relaxing': 'relaxing', 'yes': 'no'}
Output: a song's title randomly selected from a selected list (str)
'''
df = pd.read_csv(top_song_path).drop(columns=['Unnamed: 0', 'Unnamed: 0.1'])
df = df.dropna(axis=0)
dict_sentiment = {'sad': 'sentiment < 0',
'neutral': '0 <= sentiment <= 0.2',
'happy': 'sentiment > 0.2'}
dict_energy = {'relaxing': 'nrgy < 40',
'neutral': '40<= nrgy <= 60',
'intensive': 'nrgy > 60'}
dict_year = {'yes': 'year >= 2018', 'no': 'year < 2018'}
ls_dict = [dict_sentiment, dict_energy, dict_year]
if not dict_command:
return "Oops! You haven't told us your preferences :)"
str_command = ''
for key_, value_ in dict_command.items():
for dict_ in ls_dict:
if key_ in dict_:
str_command += dict_[value_] + ' and '
str_command = str_command[:-5]
ls_title = list(df.query(str_command)['title'])
return random.choice(ls_title)
| 30.456522
| 80
| 0.584582
|
f561709252df4be7fa6821fb99905d5c916bacf0
| 14,904
|
py
|
Python
|
mergify_engine/tests/functional/actions/test_merge.py
|
oharboe/mergify-engine
|
70785b1b1d9b2360f7a41c6d7f560e39d9ec4905
|
[
"Apache-2.0"
] | null | null | null |
mergify_engine/tests/functional/actions/test_merge.py
|
oharboe/mergify-engine
|
70785b1b1d9b2360f7a41c6d7f560e39d9ec4905
|
[
"Apache-2.0"
] | null | null | null |
mergify_engine/tests/functional/actions/test_merge.py
|
oharboe/mergify-engine
|
70785b1b1d9b2360f7a41c6d7f560e39d9ec4905
|
[
"Apache-2.0"
] | null | null | null |
# -*- encoding: utf-8 -*-
#
# Copyright © 2018 Mehdi Abaakouk <sileht@sileht.net>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import logging
import pytest
import yaml
from mergify_engine import config
from mergify_engine import context
from mergify_engine.queue import naive
from mergify_engine.tests.functional import base
LOG = logging.getLogger(__name__)
class TestMergeAction(base.FunctionalTestBase):
SUBSCRIPTION_ACTIVE = True
async def _do_test_smart_order(self, strict):
rules = {
"pull_request_rules": [
{
"name": "Merge on master",
"conditions": [f"base={self.master_branch_name}", "label=ready"],
"actions": {"merge": {"strict": strict}},
},
]
}
await self.setup_repo(yaml.dump(rules))
p_need_rebase, _ = await self.create_pr(base_repo="main")
# To force previous to be rebased to be rebased
p, _ = await self.create_pr(base_repo="main")
await self.merge_pull(p["number"])
await self.wait_for("pull_request", {"action": "closed"})
await self.wait_for("push", {})
await self.git("fetch", "--all")
p_ready, _ = await self.create_pr(base_repo="main")
await self.add_label(p_need_rebase["number"], "ready")
await self.add_label(p_ready["number"], "ready")
await self.run_engine()
return p_need_rebase, p_ready
async def test_merge_smart_ordered(self):
p_need_rebase, p_ready = await self._do_test_smart_order("smart+ordered")
ctxt = await context.Context.create(self.repository_ctxt, p_need_rebase, [])
q = await naive.Queue.from_context(ctxt)
pulls_in_queue = await q.get_pulls()
assert pulls_in_queue == [p_ready["number"]]
p_need_rebase = await self.get_pull(p_need_rebase["number"])
assert p_need_rebase["merged"]
assert p_need_rebase["commits"] == 2
async def test_merge_smart_unordered(self):
p_need_rebase, p_ready = await self._do_test_smart_order("smart+fastpath")
ctxt = await context.Context.create(self.repository_ctxt, p_need_rebase, [])
q = await naive.Queue.from_context(ctxt)
pulls_in_queue = await q.get_pulls()
assert pulls_in_queue == [p_need_rebase["number"]]
p_ready = await self.get_pull(p_ready["number"])
assert p_ready["merged"]
async def test_merge_smart_legacy(self):
p_need_rebase, p_ready = await self._do_test_smart_order("smart")
ctxt = await context.Context.create(self.repository_ctxt, p_need_rebase, [])
q = await naive.Queue.from_context(ctxt)
pulls_in_queue = await q.get_pulls()
assert pulls_in_queue == [p_ready["number"]]
p_need_rebase = await self.get_pull(p_need_rebase["number"])
assert p_need_rebase["merged"]
assert p_need_rebase["commits"] == 2
async def test_merge_priority(self):
rules = {
"pull_request_rules": [
{
"name": "Merge priority high",
"conditions": [
f"base={self.master_branch_name}",
"label=high",
"status-success=continuous-integration/fake-ci",
],
"actions": {
"merge": {"strict": "smart+ordered", "priority": "high"}
},
},
{
"name": "Merge priority default",
"conditions": [
f"base={self.master_branch_name}",
"label=medium",
"status-success=continuous-integration/fake-ci",
],
"actions": {"merge": {"strict": "smart+ordered"}},
},
{
"name": "Merge priority low",
"conditions": [
f"base={self.master_branch_name}",
"label=low",
"status-success=continuous-integration/fake-ci",
],
"actions": {"merge": {"strict": "smart+ordered", "priority": 1}},
},
]
}
await self.setup_repo(yaml.dump(rules))
p_high, _ = await self.create_pr()
p_medium, _ = await self.create_pr()
p_low, _ = await self.create_pr()
# To force others to be rebased
p, _ = await self.create_pr()
await self.merge_pull(p["number"])
await self.wait_for("pull_request", {"action": "closed"})
await self.run_engine()
# Merge them in reverse priority to ensure there are reordered
await self.add_label(p_low["number"], "low")
await self.create_status(p_low)
await self.add_label(p_medium["number"], "medium")
await self.create_status(p_medium)
await self.add_label(p_high["number"], "high")
await self.create_status(p_high)
await self.run_engine(1) # ensure we handle the 3 refresh here.
ctxt = await context.Context.create(self.repository_ctxt, p, [])
q = await naive.Queue.from_context(ctxt)
pulls_in_queue = await q.get_pulls()
assert pulls_in_queue == [p_high["number"], p_medium["number"], p_low["number"]]
# Each PR can rebased, because we insert them in reserve order, but they are still
# all in queue
await self.wait_for("pull_request", {"action": "synchronize"})
await self.wait_for("pull_request", {"action": "synchronize"})
await self.wait_for("pull_request", {"action": "synchronize"})
await self.run_engine()
p_high = await self.get_pull(p_high["number"])
await self.create_status(p_high)
# Ensure this events are proceed in same batch, otherwise replay may not work
await self.run_engine() # PR merged
await self.wait_for("pull_request", {"action": "closed"})
await self.run_engine(1) # ensure we handle the 2 refresh here.
await self.wait_for("pull_request", {"action": "synchronize"})
p_medium = await self.get_pull(p_medium["number"])
await self.create_status(p_medium)
await self.run_engine() # PR merged
await self.wait_for("pull_request", {"action": "closed"})
await self.run_engine(1) # ensure we handle the last refresh here.
await self.wait_for("pull_request", {"action": "synchronize"})
p_low = await self.get_pull(p_low["number"])
await self.create_status(p_low)
await self.run_engine() # PR merged
await self.wait_for("pull_request", {"action": "closed"})
p_low = await self.get_pull(p_low["number"])
p_medium = await self.get_pull(p_medium["number"])
p_high = await self.get_pull(p_high["number"])
self.assertEqual(True, p_low["merged"])
self.assertEqual(True, p_medium["merged"])
self.assertEqual(True, p_high["merged"])
assert (
datetime.datetime.fromisoformat(p_low["merged_at"][:-1])
> datetime.datetime.fromisoformat(p_medium["merged_at"][:-1])
> datetime.datetime.fromisoformat(p_high["merged_at"][:-1])
)
async def test_merge_rule_switch(self):
rules = {
"pull_request_rules": [
{
"name": "Merge priority high",
"conditions": [
f"base={self.master_branch_name}",
"label=high",
"status-success=continuous-integration/fake-ci",
],
"actions": {
"merge": {"strict": "smart+ordered", "priority": "high"}
},
},
{
"name": "Merge priority medium",
"conditions": [
f"base={self.master_branch_name}",
"label=medium",
"status-success=continuous-integration/fake-ci",
],
"actions": {"merge": {"strict": "smart+ordered"}},
},
{
"name": "Merge priority low",
"conditions": [
f"base={self.master_branch_name}",
"label=low",
"status-success=continuous-integration/fake-ci",
],
"actions": {"merge": {"strict": "smart+ordered", "priority": 1}},
},
]
}
await self.setup_repo(yaml.dump(rules))
p1, _ = await self.create_pr()
p2, _ = await self.create_pr()
# To force others to be rebased
p, _ = await self.create_pr()
await self.merge_pull(p["number"])
await self.wait_for("pull_request", {"action": "closed"})
# Merge them in reverse priority to ensure there are reordered
await self.add_label(p1["number"], "medium")
await self.add_label(p2["number"], "low")
await self.create_status(p1)
await self.create_status(p2)
await self.run_engine(1)
ctxt = await context.Context.create(self.repository_ctxt, p, [])
q = await naive.Queue.from_context(ctxt)
pulls_in_queue = await q.get_pulls()
assert pulls_in_queue == [p1["number"], p2["number"]]
await self.remove_label(p2["number"], "low")
await self.add_label(p2["number"], "high")
await self.run_engine()
pulls_in_queue = await q.get_pulls()
assert pulls_in_queue == [p2["number"], p1["number"]]
# FIXME(sileht): Provide a tools to generate oauth_token without
# the need of the dashboard
@pytest.mark.skipif(
config.GITHUB_URL != "https://github.com",
reason="We use a PAT token instead of an OAUTH_TOKEN",
)
async def test_merge_github_workflow(self):
rules = {
"pull_request_rules": [
{
"name": "Merge",
"conditions": [
f"base={self.master_branch_name}",
"label=automerge",
],
"actions": {"merge": {"strict": "smart+ordered"}},
},
]
}
await self.setup_repo(yaml.dump(rules))
p, _ = await self.create_pr(files={".github/workflows/foo.yml": "whatever"})
await self.add_label(p["number"], "automerge")
await self.run_engine()
ctxt = await context.Context.create(self.repository_ctxt, p, [])
checks = await ctxt.pull_engine_check_runs
assert len(checks) == 2
check = checks[1]
assert check["conclusion"] == "action_required"
assert check["output"]["title"] == "Pull request must be merged manually."
assert (
check["output"]["summary"]
== "GitHub App like Mergify are not allowed to merge pull request where `.github/workflows` is changed.\n<br />\nThis pull request must be merged manually."
)
await self.remove_label(p["number"], "automerge")
await self.run_engine()
ctxt = await context.Context.create(self.repository_ctxt, p, [])
checks = await ctxt.pull_engine_check_runs
assert len(checks) == 2
check = checks[1]
assert check["conclusion"] == "cancelled"
assert check["output"]["title"] == "The rule doesn't match anymore"
async def test_merge_draft(self):
rules = {
"pull_request_rules": [
{
"name": "Merge",
"conditions": [
f"base={self.master_branch_name}",
"label=automerge",
],
"actions": {"merge": {"strict": "smart+ordered"}},
},
]
}
await self.setup_repo(yaml.dump(rules))
p, _ = await self.create_pr(draft=True)
await self.add_label(p["number"], "automerge")
await self.run_engine()
ctxt = await context.Context.create(self.repository_ctxt, p, [])
checks = await ctxt.pull_engine_check_runs
assert len(checks) == 2
check = checks[1]
assert check["conclusion"] is None
assert check["output"]["title"] == "Draft flag needs to be removed"
assert check["output"]["summary"] == ""
await self.remove_label(p["number"], "automerge")
await self.run_engine()
ctxt = await context.Context.create(self.repository_ctxt, p, [])
checks = await ctxt.pull_engine_check_runs
assert len(checks) == 2
check = checks[1]
assert check["conclusion"] == "cancelled"
assert check["output"]["title"] == "The rule doesn't match anymore"
async def test_merge_with_installation_token(self):
rules = {
"pull_request_rules": [
{
"name": "merge on master",
"conditions": [f"base={self.master_branch_name}"],
"actions": {"merge": {}},
},
]
}
await self.setup_repo(yaml.dump(rules))
p, _ = await self.create_pr()
await self.run_engine()
await self.wait_for("pull_request", {"action": "closed"})
p = await self.get_pull(p["number"])
self.assertEqual(True, p["merged"])
self.assertEqual(config.BOT_USER_LOGIN, p["merged_by"]["login"])
async def test_merge_with_oauth_token(self):
rules = {
"pull_request_rules": [
{
"name": "merge on master",
"conditions": [f"base={self.master_branch_name}"],
"actions": {"merge": {"merge_bot_account": "mergify-test3"}},
},
]
}
await self.setup_repo(yaml.dump(rules))
p, _ = await self.create_pr()
await self.run_engine()
await self.wait_for("pull_request", {"action": "closed"})
p = await self.get_pull(p["number"])
self.assertEqual(True, p["merged"])
self.assertEqual("mergify-test3", p["merged_by"]["login"])
| 39.221053
| 168
| 0.560454
|
949144fac1674c027ace77216a512217cca4301b
| 326
|
py
|
Python
|
exercises/ja/test_01_02_02.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085
|
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/ja/test_01_02_02.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79
|
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/ja/test_01_02_02.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361
|
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
def test():
import spacy.tokens
import spacy.lang.de
assert isinstance(nlp, spacy.lang.de.German), "nlpオブジェクトはGermanクラスのインスタンスでなければなりません"
assert isinstance(doc, spacy.tokens.Doc), "テキストをnlpオブジェクトで処理してdocを作成しましたか?"
assert "print(doc.text)" in __solution__, "doc.textをプリントしましたか?"
__msg__.good("正解です!")
| 32.6
| 88
| 0.736196
|
34606e8e6168651108e5967eb7987555ed2b2910
| 634
|
py
|
Python
|
manage.py
|
lancea-development/product-showcase
|
32bdafaa5aa474f08db1a41e4d54b215d43ca6e6
|
[
"MIT"
] | 1
|
2021-08-02T12:50:04.000Z
|
2021-08-02T12:50:04.000Z
|
manage.py
|
wlansu/product-showcase
|
a0589f7e79eacfc70b3b66ced08a0890c4cc39c5
|
[
"MIT"
] | 11
|
2019-06-09T12:01:02.000Z
|
2022-01-13T01:22:52.000Z
|
manage.py
|
wlansu/product-showcase
|
a0589f7e79eacfc70b3b66ced08a0890c4cc39c5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "klanad_website.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
| 28.818182
| 78
| 0.68612
|
ef9c01b9a0d5b789421a2e08fe09ed4c3af2bc0a
| 7,944
|
py
|
Python
|
olivia/immunization.py
|
dsr0018/olivia
|
8b7de3a512848c5d313bbc848ac9c7b667c2f6ce
|
[
"MIT"
] | null | null | null |
olivia/immunization.py
|
dsr0018/olivia
|
8b7de3a512848c5d313bbc848ac9c7b667c2f6ce
|
[
"MIT"
] | 18
|
2020-07-09T11:56:35.000Z
|
2021-01-15T12:02:06.000Z
|
olivia/immunization.py
|
dsr0018/repo-net
|
8b7de3a512848c5d313bbc848ac9c7b667c2f6ce
|
[
"MIT"
] | 1
|
2021-01-20T10:23:49.000Z
|
2021-01-20T10:23:49.000Z
|
"""
Olivia immunization functions.
Immunization analyzes in which packages it is better to invest to protect the network as a whole.
"""
import random
import networkx as nx
from itertools import product
from olivia.lib.graphs import removed, strong_articulation_points
from olivia.model import OliviaNetwork
from olivia.networkmetrics import failure_vulnerability
from olivia.packagemetrics import Reach, DependentsCount, Impact, Surface
def immunization_delta(net, n, cost_metric=Reach, algorithm='network'):
"""
Compute the improvement in network vulnerability by immunizing a certain set of packages.
Parameters
----------
net: OliviaNetwork
Input network.
n: container
Container of packages to be immunized.
cost_metric: class, optional
Metric to measure cost.
algorithm: 'network' or 'analytic'
Returns
-------
result: float
Difference of network vulnerability after immunization of the elements in n.
Notes
-----
'network' algorithm Implements the naive algorithm of removing immunized nodes and rebuilding model from scratch,
so it is slow for big networks. Some obvious improvements could be made, but whether or not there is a
much better alternative is an open question.
'analytic' algorithm uses only local information pertaining transitive relations of the elements to be
immunized. This is faster for smaller networks and/or smaller immunization sets but slower otherwise. Only
implemented for the Reach metric.
"""
if algorithm == 'network':
return _immunization_delta_network(net, n, cost_metric=cost_metric)
elif algorithm == 'analytic' and cost_metric == Reach:
return _immunization_delta_analytic(net, n)
else:
raise ValueError("Not implemented.")
def _immunization_delta_network(net, n, cost_metric=Reach):
f1 = failure_vulnerability(net, metric=cost_metric)
size_correction = (len(net.network) - len(n)) / len(net.network)
with removed(net.network, n):
immunized_net = OliviaNetwork()
immunized_net.build_model(net.network)
f2 = failure_vulnerability(immunized_net, metric=cost_metric)
f2 = size_correction * f2
return f1 - f2
def _immunization_delta_analytic(net, n):
g = net.network
shunt = set()
a = set()
d = set()
s = set()
for node in n:
asc = nx.ancestors(g, node)
a.update(asc)
desc = nx.descendants(g, node)
d.update(desc)
s.update(set(product(asc | {node}, desc | {node})))
a = a - set(n)
d = d - set(n)
with removed(g, n):
for ancestor in a:
desc = nx.descendants(g, ancestor) | {ancestor}
shunt.update({(ancestor, f) for f in desc})
return len(s - shunt) / len(g)
def iset_naive_ranking(set_size, ms, subset=None):
"""
Compute an immunization set by selecting top elements according to a metric.
Parameters
----------
set_size: int
Number of packages in the immunization set.
ms: metricStats
Metric to measure cost.
subset: container of nodes
subset of packages to limit the ranking to
Returns
-------
immunization_set: set
Set of packages to be immunized.
"""
return {p[0] for p in ms.top(set_size, subset)}
def iset_delta_set_reach(olivia_model):
"""
Compute an immunization set using the DELTA SET algorithm with the Reach metric.
DELTA SET computes upper and lower bounds for the vulnerability reduction associated to the immunization of
each package in the network and returns a set that is guaranteed to contain the single optimum package for
immunization.
The resulting set size is a product of the algorithm and cannot be selected.
Parameters
----------
olivia_model: OliviaNetwork
Input network
Returns
-------
immunization_set: set
Set of packages to be immunized.
"""
delta_upper = olivia_model.get_metric(Reach) * olivia_model.get_metric(Surface)
delta_lower = olivia_model.get_metric(Reach) + olivia_model.get_metric(Surface) - 1
max_lower = delta_lower.top()[0][1]
return {p for p in olivia_model if delta_upper[p] > max_lower}
def iset_delta_set_impact(olivia_model):
"""
Compute an immunization set using the DELTA SET algorithm with the Impact metric.
DELTA SET computes upper and lower bounds for the vulnerability reduction associated to the immunization of
each package in the network and returns a set that is guaranteed to contain the single optimum package for
immunization.
The resulting set size is a product of the algorithm and cannot be selected.
Parameters
----------
olivia_model: OliviaNetwork
Input network
Returns
-------
immunization_set: set
Set of packages to be immunized.
"""
delta_upper = olivia_model.get_metric(Impact) * olivia_model.get_metric(Surface)
delta_lower = olivia_model.get_metric(DependentsCount) * olivia_model.get_metric(Surface)
max_lower = delta_lower.top()[0][1]
return {p for p in olivia_model if delta_upper[p] > max_lower}
def iset_sap(olivia_model, clusters=None):
"""
Compute an immunization set detecting strong articulation points (SAP).
Immunization of SAP in the strongly connected components (SCC) of the network can be very effective
in networks with large SCCs.
Large SCC play a crucial role in increasing the vulnerability of networks of dependencies. Strong articulation
points are nodes whose removal would create additional strongly connected components, thus reducing the size of
the larger SCC.
The appearance of SCCs in real packet networks seems to follow a model similar to the formation of the giant
component in Erdős-Rényi models. So the size of the largest SCC is usually much larger than the rest.
The resulting set size is a product of the algorithm and cannot be selected.
Parameters
----------
olivia_model: OliviaNetwork
Input network
clusters: sets of nodes
Iterable with sets of nodes forming SCCs in the network. If None the largest SCC is detected and used.
Returns
-------
immunization_set: set
Set of packages to be immunized corresponding to the SAP of the clusters.
"""
if clusters is None:
clusters = [olivia_model.sorted_clusters()[0]]
sap = set()
for c in clusters:
scc = olivia_model.network.subgraph(c)
sap.update(strong_articulation_points(scc))
return sap
def iset_random(olivia_model, set_size, indirect=False, seed=None):
"""
Compute an immunization set by randomly selecting packages.
This method is useful for understanding the nature of a network's vulnerability and/or for
establishing baseline immunization cases.
Parameters
----------
olivia_model: OliviaNetwork
Input network
set_size: int
Number of packages in the immunization set.
indirect: bool, optional
Whether to use indirect selection or not. Using indirect selection the immunization set is constructed
by randomly choosing a dependency of a randomly selected package.
seed: int, optional
Seed for the random number generator.
Returns
-------
immunization_set: set
Set of packages to be immunized.
"""
packages = tuple(olivia_model)
if seed:
random.seed(seed)
if indirect:
result = set()
while len(result) != set_size:
dependencies = []
while len(dependencies) == 0:
current = random.choice(packages)
dependencies = olivia_model[current].direct_dependencies()
result.add(random.choice(tuple(dependencies)))
return result
else:
return set(random.sample(packages, k=set_size))
| 32.826446
| 117
| 0.691088
|
8d7be49f3795ab46f2fc0c42edbf5728487d9d6f
| 1,339
|
py
|
Python
|
extra/bsmalea-notes-1a/polynomial_regression.py
|
cookieblues/cookieblues.github.io
|
9b570d83887eb2d6f92cfaa927a1adf136124a90
|
[
"MIT"
] | null | null | null |
extra/bsmalea-notes-1a/polynomial_regression.py
|
cookieblues/cookieblues.github.io
|
9b570d83887eb2d6f92cfaa927a1adf136124a90
|
[
"MIT"
] | 2
|
2020-03-30T14:58:30.000Z
|
2020-12-10T15:15:06.000Z
|
extra/bsmalea-notes-1a/polynomial_regression.py
|
cookieblues/cookieblues.github.io
|
9b570d83887eb2d6f92cfaa927a1adf136124a90
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy.constants import golden
mpl.rc("text", usetex=True)
mpl.rc("font", family="serif")
x = np.array([-1, -0.8, -0.6, -0.4, -0.2, 0, 0.2, 0.4, 0.6, 0.8, 1])
t = np.array([-4.9, -3.5, -2.8, 0.8, 0.3, -1.6, -1.3, 0.5, 2.1, 2.9, 5.6])
def f(x):
return 3*np.sin((1/2)*np.pi * x) - 2*np.sin((3/2) * np.pi * x)
M = 4
N = len(x)
X = np.zeros((N, M+1))
for m in range(M+1):
X[:, m] = x**m
beta = np.linalg.inv(X.T @ X) @ X.T @ t
h = np.poly1d(np.flip(beta, 0))
x_ = np.linspace(x.min()-0.025, x.max()+0.025, 250)
t_ = h(x_)
fig = plt.figure(figsize=(8, 8/golden))
ax = fig.add_subplot()
ax.scatter(x, t,
edgecolors = "magenta",
c = "None",
s = 12.5,
marker = "o"
)
ax.plot(x_, t_,
color="turquoise",
linewidth = 1,
label = "Predicted"
)
true = np.linspace(x.min()-0.025, x.max()+0.025, 250)
ax.plot(
true, f(true),
color="magenta",
linewidth = 1,
label = "True"
)
ax.set_xlim(x.min()-0.025, x.max()+0.025)
ax.set_xticks([-1, -0.8, -0.6, -0.4, -0.2, 0, 0.2, 0.4, 0.6, 0.8, 1])
ax.set_xticklabels(["$-1.0$", "$-0.8$", "$-0.6$", "$-0.4$", "$-0.2$", "$0.0$", "$0.2$", "$0.4$", "$0.6$", "$0.8$", "$1.0$"])
ax.legend(frameon=False, fontsize=14)
plt.tight_layout()
plt.savefig("poly_reg.png")
plt.show()
| 21.253968
| 124
| 0.538462
|
44fd6cd3c295a847c26dddde0d65fee85799df07
| 17,555
|
py
|
Python
|
one_fm/hooks.py
|
ONE-F-M/One-FM
|
36fac99f6ab3f0d5a8c0b9a29b6d36b266255189
|
[
"MIT"
] | 16
|
2021-06-14T23:56:47.000Z
|
2022-03-22T12:05:06.000Z
|
one_fm/hooks.py
|
ONE-F-M/One-FM
|
36fac99f6ab3f0d5a8c0b9a29b6d36b266255189
|
[
"MIT"
] | 119
|
2020-08-17T16:27:45.000Z
|
2022-03-28T12:42:56.000Z
|
one_fm/hooks.py
|
ONE-F-M/One-FM
|
36fac99f6ab3f0d5a8c0b9a29b6d36b266255189
|
[
"MIT"
] | 12
|
2021-05-16T13:35:40.000Z
|
2022-02-21T12:41:04.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import __version__ as app_version
import frappe as _frappe
from frappe import _
from erpnext.hr.doctype.shift_type.shift_type import ShiftType
from one_fm.api.doc_methods.shift_type import process_auto_attendance
app_name = "one_fm"
app_title = "One Fm"
app_publisher = "omar jaber"
app_description = "One Facility Management is a leader in the fields of commercial automation and integrated security management systems providing the latest in products and services in these fields"
app_icon = "octicon octicon-file-directory"
app_color = "grey"
app_email = "omar.ja93@gmail.com"
app_license = "MIT"
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
app_include_css = "/assets/one_fm/css/one_fm.css"
app_include_js = [
"/assets/one_fm/js/maps.js"
]
# include js, css files in header of web template
# web_include_css = "/assets/one_fm/css/one_fm.css"
# web_include_js = "/assets/one_fm/js/one_fm.js"
# include js in page
page_js = {
"roster" : [
# "public/js/roster_js/jquery-ui.min.js",
# "public/js/roster_js/bootstrap-datepicker.min.js",
"public/js/roster_js/bootstrap-notify.min.js",
"public/js/roster_js/select2.min.js",
"public/js/roster_js/jquery.dataTables.min.js",
"public/js/roster_js/jquery.validate.min.js",
"public/js/roster_js/additional-methods.min.js",
"public/js/roster_js/rosteringmodalvalidation.js",
"public/js/roster_js/flatpickr.min.js"
],
"checkpoint-scan": [
"public/js/html5-qrcode.min.js"
]
}
# include js in doctype views
# doctype_js = {"doctype" : "public/js/doctype.js"}
doctype_js = {
"Location" : "public/js/doctype_js/location.js",
"Shift Type" : "public/js/doctype_js/shift_type.js",
"Leave Type" : "public/js/doctype_js/leave_type.js",
"Project": "public/js/doctype_js/project.js",
"Notification Log": "public/js/doctype_js/notification_log.js",
"Sales Invoice": "public/js/doctype_js/sales_invoice.js",
"Delivery Note": "public/js/doctype_js/delivery_note.js",
"Job Applicant": "public/js/doctype_js/job_applicant.js",
"Job Offer": "public/js/doctype_js/job_offer.js",
"Price List": "public/js/doctype_js/price_list.js",
"Vehicle": "public/js/doctype_js/vehicle.js",
"Asset": "public/js/doctype_js/asset.js",
"Supplier": "public/js/doctype_js/supplier.js",
"Item": "public/js/doctype_js/item.js",
"Item Group": "public/js/doctype_js/item_group.js",
"Purchase Receipt": "public/js/doctype_js/purchase_receipt.js",
"Asset Movement": "public/js/doctype_js/asset_movement.js",
"Job Opening": "public/js/doctype_js/job_opening.js",
"Warehouse": "public/js/doctype_js/warehouse.js",
"Purchase Invoice": "public/js/doctype_js/purchase_invoice.js",
"Purchase Order": "public/js/doctype_js/purchase_order.js",
"Journal Entry": "public/js/doctype_js/journal_entry.js",
"Payment Entry": "public/js/doctype_js/payment_entry.js",
"Item Price": "public/js/doctype_js/item_price.js"
}
doctype_list_js = {
"Job Applicant" : "public/js/doctype_js/job_applicant_list.js",
"Job Offer": "public/js/doctype_js/job_offer_list.js"
}
doctype_tree_js = {
"Warehouse" : "public/js/doctype_tree_js/warehouse_tree.js",
}
# doctype_list_js = {"doctype" : "public/js/doctype_list.js"}
# doctype_tree_js = {"doctype" : "public/js/doctype_tree.js"}
# doctype_calendar_js = {"doctype" : "public/js/doctype_calendar.js"}
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "login"
home_page = "landing_page"
# website user home page (by Role)
# role_home_page = {
# "Role": "home_page"
# }
# Website user home page (by function)
# get_website_user_home_page = "one_fm.utils.get_home_page"
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "one_fm.install.before_install"
# after_install = "one_fm.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "one_fm.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }
#
# has_permission = {
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }
# Document Events
# ---------------
# Hook on document methods and events
permission_query_conditions = {
"Penalty": "one_fm.legal.doctype.penalty.penalty.get_permission_query_conditions",
"Penalty Issuance": "one_fm.legal.doctype.penalty_issuance.penalty_issuance.get_permission_query_conditions"
}
has_permission = {
"Penalty": "one_fm.legal.doctype.penalty.penalty.has_permission",
"Penalty Issuance": "one_fm.legal.doctype.penalty_issuance.penalty_issuance.has_permission"
}
doc_events = {
"Stock Entry": {
"on_submit": "one_fm.purchase.doctype.request_for_material.request_for_material.update_completed_and_requested_qty",
"on_cancel": "one_fm.purchase.doctype.request_for_material.request_for_material.update_completed_and_requested_qty"
},
"Purchase Order": {
"on_submit": "one_fm.purchase.doctype.request_for_material.request_for_material.update_completed_purchase_qty",
"on_cancel": "one_fm.purchase.doctype.request_for_material.request_for_material.update_completed_purchase_qty"
},
"Leave Application": {
"on_submit": "one_fm.utils.leave_appillication_on_submit",
"validate": "one_fm.utils.validate_hajj_leave",
"on_cancel": "one_fm.utils.leave_appillication_on_cancel"
},
"Leave Type": {
"validate": "one_fm.utils.validate_leave_type_for_one_fm_paid_leave"
},
"Employee": {
"validate":"one_fm.hiring.utils.set_employee_name",
"before_validate": "one_fm.api.doc_events.employee_before_validate",
"after_insert": "one_fm.hiring.utils.employee_after_insert",
"on_update":"one_fm.hiring.utils.set_mandatory_feilds_in_employee_for_Kuwaiti"
},
"Employee Grade": {
"validate": "one_fm.one_fm.utils.employee_grade_validate"
},
"Job Applicant": {
"validate": "one_fm.utils.validate_job_applicant",
"onload": "one_fm.utils.validate_pam_file_number_and_pam_designation",
"on_update": "one_fm.one_fm.utils.send_notification_to_grd_or_recruiter",
"after_insert": "one_fm.hiring.utils.after_insert_job_applicant"
},
"Job Offer": {
"validate": "one_fm.hiring.utils.validate_job_offer",
"on_update_after_submit": "one_fm.hiring.utils.job_offer_on_update_after_submit",
"onload": "one_fm.hiring.utils.job_offer_onload"
},
"Shift Type": {
"autoname": "one_fm.api.doc_events.naming_series"
},
"Warehouse": {
"autoname": "one_fm.utils.warehouse_naming_series",
"before_insert": "one_fm.utils.before_insert_warehouse",
"on_update": "one_fm.utils.set_warehouse_contact_from_project"
},
"Vehicle": {
"autoname": "one_fm.fleet_management.utils.vehicle_naming_series",
"after_insert": "one_fm.fleet_management.doctype.vehicle_leasing_contract.vehicle_leasing_contract.after_insert_vehicle"
},
"Item Group": {
"autoname": "one_fm.utils.item_group_naming_series",
"before_insert": "one_fm.utils.validate_get_item_group_parent",
"after_insert": "one_fm.utils.after_insert_item_group"
},
"Item": {
"autoname": "one_fm.utils.item_naming_series",
"before_insert": "one_fm.utils.before_insert_item",
"validate": "one_fm.utils.validate_item"
},
"Supplier Group": {
"on_update": "one_fm.utils.supplier_group_on_update",
},
"Bank Account": {
"on_update": "one_fm.utils.bank_account_on_update",
"on_trash": "one_fm.utils.bank_account_on_trash",
},
"Employee Checkin": {
"validate": "one_fm.api.doc_events.employee_checkin_validate",
"after_insert": "one_fm.api.doc_events.checkin_after_insert"
},
"Purchase Receipt": {
"before_submit": "one_fm.purchase.utils.before_submit_purchase_receipt",
"on_submit": "one_fm.one_fm.doctype.customer_asset.customer_asset.on_purchase_receipt_submit"
},
"ToDo": {
"after_insert": "one_fm.grd.utils.todo_after_insert"
},
"Contact": {
"on_update": "one_fm.accommodation.doctype.accommodation.accommodation.accommodation_contact_update"
},
"Project": {
"onload": "one_fm.one_fm.project_custom.get_depreciation_expense_amount"
# "on_update": "one_fm.api.doc_events.project_on_update"
},
"Attendance": {
"on_submit": "one_fm.api.tasks.update_shift_details_in_attendance"
},
"Asset":{
"after_insert" : "one_fm.one_fm.asset_custom.after_insert_asset",
"on_submit": "one_fm.one_fm.asset_custom.on_asset_submit"
},
"Sales Invoice":{
"before_submit": "one_fm.one_fm.sales_invoice_custom.before_submit_sales_invoice"
},
"Salary Slip": {
"before_submit": "one_fm.api.doc_methods.salary_slip.salary_slip_before_submit"
},
"Training Event":{
"on_submit": "one_fm.api.doc_events.update_training_event_data"
},
"Training Result" :{
"on_submit": "one_fm.api.doc_events.update_certification_data"
},
# "Additional Salary" :{
# "on_submit": "one_fm.grd.utils.validate_date"
# }
}
standard_portal_menu_items = [
{"title": "Job Applications", "route": "/job-applications", "reference_doctype": "Job Applicant", "role": "Job Applicant"},
{"title": _("Request for Supplier Quotations"), "route": "/rfq1", "reference_doctype": "Request for Supplier Quotation", "role": "Supplier"},
{"title": _("Job Openings"), "route": "/agency_job_opening", "reference_doctype": "Job Opening", "role": "Agency"}
]
has_website_permission = {
"Job Applicant": "one_fm.utils.applicant_has_website_permission",
"Job Opening": "one_fm.one_fm.doctype.agency.agency.agency_has_website_permission"
}
website_route_rules = [
{"from_route": "/rfq1", "to_route": "Request for Supplier Quotation"},
{"from_route": "/rfq1/<path:name>", "to_route": "rfq1",
"defaults": {
"doctype": "Request for Supplier Quotation",
"parents": [{"label": _("Request for Supplier Quotation"), "route": "rfq1"}]
}
}
]
# doc_events = {
# "*": {
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }
# }
# Scheduled Tasks
# ---------------
scheduler_events = {
"daily": [
'one_fm.utils.pam_salary_certificate_expiry_date',
'one_fm.utils.pam_authorized_signatory',
'one_fm.utils.hooked_leave_allocation_builder',
'one_fm.utils.increase_daily_leave_balance',
'one_fm.one_fm.hr_utils.daily_indemnity_allocation_builder',
'one_fm.one_fm.hr_utils.allocate_daily_indemnity',
'one_fm.utils.check_grp_operator_submission_daily',
'one_fm.utils.check_grp_supervisor_submission_daily',
'one_fm.utils.check_pam_visa_approval_submission_daily',
'one_fm.utils.check_upload_original_visa_submission_daily',
'one_fm.hiring.utils.notify_finance_job_offer_salary_advance',
'one_fm.api.tasks.automatic_shift_assignment',
'one_fm.uniform_management.doctype.employee_uniform.employee_uniform.notify_gsd_and_employee_before_uniform_expiry',
'one_fm.operations.doctype.mom_followup.mom_followup.mom_followup_reminder',
'one_fm.one_fm.depreciation_custom.post_depreciation_entries',
'one_fm.operations.doctype.contracts.contracts.auto_renew_contracts',
],
"hourly": [
# "one_fm.api.tasks.send_checkin_hourly_reminder",
'one_fm.utils.send_gp_letter_attachment_reminder3',
'one_fm.utils.send_gp_letter_reminder'
],
"weekly": [
'one_fm.operations.doctype.mom_followup.mom_followup.mom_sites_followup',
'one_fm.operations.doctype.mom_followup.mom_followup.mom_followup_penalty',
],
"monthly": [
"one_fm.accommodation.utils.execute_monthly"
],
"cron": {
"0 8 * * 0,1,2,3,4":[#run durring working days only
'one_fm.grd.doctype.work_permit.work_permit.system_remind_renewal_operator_to_apply',#wp
'one_fm.grd.doctype.work_permit.work_permit.system_remind_transfer_operator_to_apply',
'one_fm.grd.doctype.medical_insurance.medical_insurance.system_remind_renewal_operator_to_apply',#mi
'one_fm.grd.doctype.medical_insurance.medical_insurance.system_remind_transfer_operator_to_apply',
'one_fm.grd.doctype.moi_residency_jawazat.moi_residency_jawazat.system_remind_renewal_operator_to_apply',#moi
'one_fm.grd.doctype.moi_residency_jawazat.moi_residency_jawazat.system_remind_transfer_operator_to_apply',
'one_fm.grd.doctype.paci.paci.system_remind_renewal_operator_to_apply',#paci
'one_fm.grd.doctype.paci.paci.system_remind_transfer_operator_to_apply',
'one_fm.grd.doctype.paci.paci.notify_operator_to_take_hawiyati_renewal',#paci hawiyati
'one_fm.grd.doctype.paci.paci.notify_operator_to_take_hawiyati_transfer'
],
"0 8 15 * *": [
'one_fm.grd.doctype.preparation.preparation.create_preparation',
],
"0 8 1 * *": [# first day of the Month at 8 am
'one_fm.grd.doctype.pifss_monthly_deduction.pifss_monthly_deduction.auto_create_pifss_monthly_deduction_record',
],
"0/1 * * * *": [
"one_fm.legal.doctype.penalty.penalty.automatic_reject",
'one_fm.api.tasks.process_attendance'
],
"0/5 * * * *": [
"one_fm.api.tasks.supervisor_reminder",
"one_fm.api.tasks.final_reminder",
"one_fm.api.tasks.checkin_deadline"
#"one_fm.api.tasks.automatic_checkout"
],
"0/15 * * * *": [
"one_fm.api.tasks.update_shift_type"
],
"30 10 * * *": [
'one_fm.utils.create_gp_letter_request'
],
"45 10 * * *": [
'one_fm.utils.send_travel_agent_email'
],
"0 4 * * *": [
'one_fm.utils.check_grp_operator_submission_four'
],
"30 4 * * *": [
'one_fm.utils.check_grp_operator_submission_four_half'
],
"0 8 * * *": [
'one_fm.utils.send_gp_letter_attachment_reminder2',
'one_fm.utils.send_gp_letter_attachment_no_response',
'one_fm.grd.doctype.fingerprint_appointment.fingerprint_appointment.before_one_day_of_appointment_date',
'one_fm.grd.doctype.paci.paci.notify_to_upload_hawiyati',
# 'one_fm.grd.doctype.fingerprint_appointment.fingerprint_appointment.get_employee_list',
'one_fm.grd.doctype.fingerprint_appointment.fingerprint_appointment.notify_grd_operator_documents',
'one_fm.grd.doctype.pifss_form_103.pifss_form_103.notify_grd_to_check_status_on_pifss',
'one_fm.grd.doctype.pifss_form_103.pifss_form_103.notify_grd_to_check_under_process_status_on_pifss',
'one_fm.grd.doctype.mgrp.mgrp.notify_awaiting_response_mgrp',
'one_fm.grd.utils.sendmail_reminder_to_book_appointment_for_pifss',
'one_fm.grd.utils.sendmail_reminder_to_collect_pifss_documents',
'one_fm.hiring.doctype.transfer_paper.transfer_paper.check_signed_workContract_employee_completed',
'one_fm.utils.issue_roster_actions'
],
"0 9 * * *": [
'one_fm.utils.check_upload_tasriah_submission_nine',
],
"0 11 * * *": [
'one_fm.utils.check_upload_tasriah_reminder1'
],
# "one_fm.one_fm.grd" doesnt find the module, only "one_fm.grd"
"0 10 * * *": [
'one_fm.utils.check_upload_tasriah_reminder2',
'one_fm.grd.doctype.medical_insurance.medical_insurance.notify_grd_operator_to_mark_completed_second'
],
"30 6 * * *": [
'one_fm.utils.check_pam_visa_approval_submission_six_half'
],
"0 7 * * *": [
'one_fm.utils.check_pam_visa_approval_submission_seven'
],
"30 12 * * *": [
'one_fm.utils.check_upload_original_visa_submission_reminder1'
],
"0 13 * * *": [
'one_fm.utils.check_upload_original_visa_submission_reminder2'
],
"0 6 * * *":[
'one_fm.one_fm.sales_invoice_custom.create_sales_invoice'
],
"0 */48 * * *": [
'one_fm.one_fm.pifss.doctype.pifss_form_103.pifss_form_103.notify_open_pifss'
],
"00 00 24 * *": [
'one_fm.api.tasks.generate_penalties'
],
"00 11 26 * *": [
'one_fm.api.tasks.generate_site_allowance'
],
"00 02 24 * *": [
'one_fm.api.tasks.generate_payroll'
]
}
}
# scheduler_events = {
# "all": [
# "one_fm.tasks.all"
# ],
# "daily": [
# "one_fm.tasks.daily"
# ],
# "hourly": [
# "one_fm.tasks.hourly"
# ],
# "weekly": [
# "one_fm.tasks.weekly"
# ]
# "monthly": [
# "one_fm.tasks.monthly"
# ]
# }
# Testing
# -------
# from one_fm.purchase.custom_field_list import get_custom_field_name_list
# my_custom_fieldname_list = get_custom_field_name_list(['Job Applicant'])
# fixtures = [
# {
# "dt": "Custom Field",
# 'filters': [['name', 'in', my_custom_fieldname_list]]
# },
# {
# "dt": "Custom Script",
# 'filters': [['dt', 'in', ['Job Applicant', 'Job Opening', 'Job Offer', 'Item', 'Stock Entry', 'Warehouse', 'Supplier',
# 'Payment Entry', 'Payment Request', 'Purchase Receipt', 'Purchase Order']]]
# }
# ]
fixtures = [
{
"dt": "Custom Field",
# 'filters': [['dt', 'in', ['Shift Request', 'Shift Permission', 'Employee', 'Project', 'Location', 'Employee Checkin', 'Shift Assignment', 'Shift Type', 'Operations Site']]]
},
{
"dt": "Property Setter"
},
{
"dt": "Workflow State"
},
{
"dt": "Workflow Action Master"
},
{
"dt": "Workflow"
},
{
"dt": "Client Script",
'filters': [['dt', 'in', ['Stock Entry', 'Warehouse',
'Payment Entry', 'Payment Request', 'Purchase Receipt', 'Purchase Order']]]
},
{
"dt": "Print Format"
},
{
"dt": "Role",
"filters": [["name", "in",["Operations Manager", "Shift Supervisor", "Site Supervisor", "Projects Manager"]]]
},
{
"dt": "Custom DocPerm",
"filters": [["role", "in",["Operations Manager", "Shift Supervisor", "Site Supervisor", "Projects Manager"]]]
}
]
# before_tests = "one_fm.install.before_tests"
# Overriding Whitelisted Methods
# ------------------------------
#
# override_whitelisted_methods = {
# "frappe.desk.doctype.event.event.get_events": "one_fm.event.get_events"
# }
ShiftType.process_auto_attendance = process_auto_attendance
| 34.625247
| 199
| 0.735802
|
78abafdd647de2748e8f27c9e6f1f979f0d1325f
| 3,052
|
py
|
Python
|
application.py
|
lalebdi/FlaskDataProcess
|
d305526bbcf44b301f9789af9de2f8b71e7c19c3
|
[
"Unlicense"
] | null | null | null |
application.py
|
lalebdi/FlaskDataProcess
|
d305526bbcf44b301f9789af9de2f8b71e7c19c3
|
[
"Unlicense"
] | null | null | null |
application.py
|
lalebdi/FlaskDataProcess
|
d305526bbcf44b301f9789af9de2f8b71e7c19c3
|
[
"Unlicense"
] | null | null | null |
from flask import Flask
from flask_restful import Api, Resource, reqparse
from nameparser import HumanName
application = Flask(__name__)
app = application
api = Api(app)
post_args = reqparse.RequestParser()
post_args.add_argument("value", type=str, help="Value is required to proceed", required=True)
post_args.add_argument("mode", type=str, help="Choose between phone || name || amount", required=True)
post_args.add_argument("replace_with", type=str, help="Choose either --blank-- || --original--", required=True)
def processing_data(data):
"""Takes in a dict data from the post request, returns a new dict with the data processed"""
template = [("original_value", data["value"]), ("mode", data["mode"])]
new_data = dict(template)
if data["mode"] == "name":
new_data["output"] = name_output_cleanup(data["value"], data["replace_with"])
elif data["mode"] == "phone":
new_data["output"] = extract_phone_number(data["value"], data["replace_with"])
elif data["mode"] == "amount":
new_data["output"] = process_amount(data["value"], data["replace_with"])
else:
return {'message': {'mode': "phone || name || amount"}}
return new_data
def name_output_cleanup(name_data, replacement):
"""Takes in the name value as string and replace_with arg as replacement, returns a dict with mapped name"""
unprocessed_name = HumanName(name_data).as_dict()
values = ['first', 'middle', 'last']
new_name = {
key: value for key, value in unprocessed_name.items() if value != ""
}
if all(key in new_name for key in values):
return new_name
if replacement == "--original--":
new_name = name_data
else:
new_name = dict([(key, "--blank--") for key in values])
return new_name
def extract_phone_number(num, replacement):
"""Takes in the phone number as string and replace_with arg as replacement, returns processed phone num or None"""
phone_num = "".join(i for i in num if i.isdigit())
if len(phone_num) != 10:
phone_num = replacement if replacement == "--blank--" else num
return phone_num
def process_amount(input_string, replacement):
"""Takes in input_string (amount) as a string and replace_with arg as replacement, returns the the processed str"""
new_amount = "".join(
char for char in input_string if char.isdigit() or char == "."
)
if new_amount == "":
if replacement == "--blank--":
return "--blank--"
else:
return input_string
return "{:.2f}".format(float(new_amount))
class DataProcess(Resource):
def get(self):
return {"message": 'This URL will accept a POST call with the following payload : '
'{"value": "<value goes here>", "mode": "phone || name || amount", "replace_with": "--blank-- || --original--"}'}
def post(self):
args = post_args.parse_args()
output_data = processing_data(args)
return output_data, 201
api.add_resource(DataProcess, "/")
| 34.681818
| 140
| 0.650721
|
56b36372abf828a8175c739e9e0f487791ba09a2
| 11,844
|
py
|
Python
|
reid/exclusive_loss.py
|
khko1022/bottom_up_reid
|
e60e86f5504f72b3ff8258702cd30c08f5a745f7
|
[
"MIT"
] | null | null | null |
reid/exclusive_loss.py
|
khko1022/bottom_up_reid
|
e60e86f5504f72b3ff8258702cd30c08f5a745f7
|
[
"MIT"
] | null | null | null |
reid/exclusive_loss.py
|
khko1022/bottom_up_reid
|
e60e86f5504f72b3ff8258702cd30c08f5a745f7
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
import torch
import torch.nn.functional as F
import numpy as np
from torch import nn, autograd
class Real_negative_exclusive(autograd.Function):
def __init__(self, V, label_to_pairs, all_label_to_clusterid):
super(Real_negative_exclusive, self).__init__()
self.V = V
self.label_to_pairs=label_to_pairs
self.all_label_to_clusterid=all_label_to_clusterid
def forward(self, inputs, targets):
self.save_for_backward(inputs, targets)
outputs = inputs.mm(self.V.t())
for i, pairs in enumerate(self.label_to_pairs):
mask=torch.ones(len(self.V)).type(torch.bool)
mask_=list(set([ clusterid for j, clusterid in enumerate(self.all_label_to_clusterid) if j in pairs[1]]))
mask[mask_]=False
outputs[i, mask]=0
return outputs
def backward(self, grad_outputs):
inputs, targets = self.saved_tensors
grad_inputs = grad_outputs.mm(self.V) if self.needs_input_grad[0] else None
for x, y in zip(inputs, targets):
self.V[y] = F.normalize( (self.V[y] + x) / 2, p=2, dim=0)
return grad_inputs, None
class Exclusive(autograd.Function):
def __init__(self, V):
super(Exclusive, self).__init__()
self.V = V
def forward(self, inputs, targets):
self.save_for_backward(inputs, targets)
outputs = inputs.mm(self.V.t())
return outputs
def backward(self, grad_outputs):
inputs, targets = self.saved_tensors
grad_inputs = grad_outputs.mm(self.V) if self.needs_input_grad[0] else None
for x, y in zip(inputs, targets):
self.V[y] = F.normalize( (self.V[y] + x) / 2, p=2, dim=0)
return grad_inputs, None
class ExLoss(nn.Module):
def __init__(self, num_features, num_classes, bottom_up_real_negative=False, ms_table=False, ms_real_negative=False, t=1.0, weight=None):
super(ExLoss, self).__init__()
self.num_features = num_features
self.t = t
self.weight = weight
self.register_buffer('V', torch.zeros(num_classes, num_features))
self.w_bu=1.
self.w_ms=0.
self.bottom_up_real_negative=bottom_up_real_negative
self.ms_real_negative=ms_real_negative
self.ms_table=ms_table
self.p_margin=0.5
self.n_margin=0.5
self.num_pos=0
self.num_pos_notable=0
self.num_hpos=0
self.num_neg=0
self.num_neg_notable=0
self.num_hneg=0
print('w_bu: %.2f, w_ms: %.2f, p_margin: %.2f, n_margin: %.2f'%(self.w_bu, self.w_ms, self.p_margin, self.n_margin))
print('bottom_up_real_negative: %s, ms_real_negative: %s, ms_table: %s'%(self.bottom_up_real_negative, self.ms_real_negative, self.ms_table))
def forward(self, inputs, targets, indexs, label_to_pairs, all_label_to_clusterid):
# bu loss
if self.bottom_up_real_negative:
outputs = Real_negative_exclusive(self.V, label_to_pairs, all_label_to_clusterid)(inputs, targets) * self.t
bu_loss = F.cross_entropy(outputs, targets, weight=self.weight)
else:
outputs = Exclusive(self.V)(inputs, targets) * self.t
bu_loss = F.cross_entropy(outputs, targets, weight=self.weight)
# ms loss
if self.ms_real_negative: ms_loss, outputs = self.real_negative_ms(inputs, targets, indexs, label_to_pairs, all_label_to_clusterid)
else: ms_loss, outputs = self.ms(inputs, targets, indexs, label_to_pairs, all_label_to_clusterid)
# loss=self.w_bu*bu_loss
# loss=self.w_ms*ms_loss
loss=self.w_ms*ms_loss+self.w_bu*bu_loss
print('in')
return loss, outputs
## hard negative mining
def real_negative_ms(self, inputs, targets, indexs, label_to_pairs, all_label_to_clusterid):
ms_loss=[]
sims=inputs.mm(inputs.t())
# if self.ms_real_negative: tsims=Exclusive(self.V)(inputs, targets, label_to_pairs, all_label_to_clusterid)
# else: tsims=no_prior_Exclusive(self.V)(inputs, targets)
tsims=Exclusive(self.V)(inputs, targets)
psims=[[] for i in range(sims.shape[0])]
nsims=[[] for i in range(sims.shape[0])]
for i, (pairs, target) in enumerate(zip(label_to_pairs, targets)):
# positive
for ppair in pairs[0]:
if len((ppair==indexs).nonzero())!=0:
for index in [(ppair==indexs).nonzero().item()]:
psims[i].append(sims[i, index])
self.num_pos_notable+=1
if self.ms_table:
if tsims[i, target] !=0: psims[i].append(tsims[i, target])
# negative
for npair in pairs[1]:
if len((npair==indexs).nonzero())!=0:
for index in [(npair==indexs).nonzero().item()]:
nsims[i].append(sims[i, index])
self.num_neg_notable+=1
if self.ms_table:
if tsims[i, all_label_to_clusterid[npair]] !=0: nsims[i].append(tsims[i, all_label_to_clusterid[npair]])
# threshold
p_thrds=[ max(nsim or [torch.tensor(-3).cuda()]) for nsim in nsims]
p_thrds=list(map(lambda x: x+self.p_margin, p_thrds))
n_thrds=[ min(psim or [torch.tensor(3).cuda()]) for psim in psims]
n_thrds=list(map(lambda x: x-self.n_margin, n_thrds))
# hard positive and hard negatvie
hpsims=[ list(filter(lambda x: x<p_thrds[i], psim)) for i, psim in enumerate(psims)]
hpsims=sum(hpsims, [])
hnsims=[ list(filter(lambda x: ((x>n_thrds[i])& (x<torch.tensor(0.999999).cuda())), nsim)) for i, nsim in enumerate(nsims)]
hnsims=sum(hnsims, [])
if len(hpsims)==0: hp_loss=tsims.mean()*torch.zeros(1).cuda()
else:
hpsims_=torch.stack(hpsims)
# hp_loss=F.mse_loss(hpsims_, torch.ones(hpsims_.shape).cuda())
hp_loss=F.binary_cross_entropy_with_logits(hpsims_, torch.ones(hpsims_.shape).cuda())
# hp_loss = 1.0 / 2 * torch.log(1 + torch.sum(torch.exp(-2 * (hpsims_ - 0.5))))
if len(hnsims)==0: hn_loss=tsims.mean()*torch.zeros(1).cuda()
else:
hnsims_=torch.stack(hnsims)
# hn_loss=F.mse_loss(hnsims_, -torch.ones(hnsims_.shape).cuda())
hn_loss=F.binary_cross_entropy_with_logits(hnsims_, torch.zeros(hnsims_.shape).cuda())
# hn_loss = 1.0 / 50 * torch.log(1 + torch.sum(torch.exp( 50 * (hnsims_ - 0.5))))
# loss calculate ovelapped
ms_loss=hp_loss+hn_loss
self.num_pos+=len(sum(psims, []))
self.num_hpos+=len(hpsims)
self.num_neg+=len(sum(nsims, []))
self.num_hneg+=len(hnsims)
return ms_loss, self.t*tsims
## hard negative mining
def ms(self, inputs, targets, indexs, label_to_pairs, all_label_to_clusterid):
ms_loss=[]
sims=inputs.mm(inputs.t())
# if self.ms_real_negative: tsims=Exclusive(self.V)(inputs, targets, label_to_pairs)
# else: tsims=no_prior_Exclusive(self.V)(inputs, targets)
tsims=Exclusive(self.V)(inputs, targets)
psims=[[] for i in range(sims.shape[0])]
nsims=[[] for i in range(sims.shape[0])]
for i, target in enumerate(targets):
# positive
for index in (target==targets).nonzero().view(-1).tolist():
psims[i].append(sims[i, index])
self.num_pos_notable+=1
if self.ms_table:
if tsims[i, target] !=0: psims[i].append(tsims[i, target])
# negative
for index in (target!=targets).nonzero().view(-1).tolist():
nsims[i].append(sims[i, index])
self.num_neg_notable+=1
if self.ms_table:
mask=torch.zeros(len(self.V)).type(torch.bool).cuda()
mask[target]=True
tsims[i, mask]=0
nsims[i].extend(tsims[i, :][tsims[i, :]!=0])
# threshold
p_thrds=[ max(nsim or [torch.tensor(-3).cuda()]) for nsim in nsims]
p_thrds=list(map(lambda x: x+self.p_margin, p_thrds))
n_thrds=[ min(psim or [torch.tensor(3).cuda()]) for psim in psims]
n_thrds=list(map(lambda x: x-self.n_margin, n_thrds))
# hard positive and hard negatvie
hpsims=[ list(filter(lambda x: x<p_thrds[i], psim)) for i, psim in enumerate(psims)]
hpsims=sum(hpsims, [])
hnsims=[ list(filter(lambda x: ((x>n_thrds[i])& (x<torch.tensor(0.999999).cuda())), nsim)) for i, nsim in enumerate(nsims)]
hnsims=sum(hnsims, [])
if len(hpsims)==0: hp_loss=tsims.mean()*torch.zeros(1).cuda()
else:
hpsims_=torch.stack(hpsims)
# hp_loss=F.mse_loss(hpsims_, torch.ones(hpsims_.shape).cuda())
hp_loss=F.binary_cross_entropy_with_logits(hpsims_, torch.ones(hpsims_.shape).cuda())
# hp_loss = 1.0 / 2 * torch.log(1 + torch.sum(torch.exp(-2 * (hpsims_ - 0.5))))
if len(hnsims)==0: hn_loss=tsims.mean()*torch.zeros(1).cuda()
else:
hnsims_=torch.stack(hnsims)
# hn_loss=F.mse_loss(hnsims_, -torch.ones(hnsims_.shape).cuda())
hn_loss=F.binary_cross_entropy_with_logits(hnsims_, torch.zeros(hnsims_.shape).cuda())
# hn_loss = 1.0 / 50 * torch.log(1 + torch.sum(torch.exp( 50 * (hnsims_ - 0.5))))
# loss calculate ovelapped
ms_loss=hp_loss+hn_loss
self.num_pos+=len(sum(psims, []))
self.num_hpos+=len(hpsims)
self.num_neg+=len(sum(nsims, []))
self.num_hneg+=len(hnsims)
return ms_loss, self.t*tsims
# ## hard negative mining with table self.V
# def ms_loss_with_table(self, inputs, targets, label_to_pairs, indexs, all_label_to_clusterid):
# # When table self.V is filled
# if len((torch.sum(self.V, 1)==torch.zeros(torch.sum(self.V, 1).shape).cuda()).nonzero())==0:
# # threshold
# normalized_inputs=F.normalize(inputs, dim=1)
# sims=normalized_inputs.mm(self.V.t())
# n_thrds=[]
# for i, target in enumerate(targets): n_thrds.append(sims[i, target])
# n_thrds=list(map(lambda x: x-self.n_margin, n_thrds))
# assert len(targets)==len(n_thrds), "n_thrds has wrong length."
# # hard negative
# tsims=self.V[targets].mm(self.V.t())
# nsims=[[] for i in range(tsims.shape[0])]
# for i, pairs in enumerate(label_to_pairs):
# all_label_to_clusterid_=list(set([ clusterid for i, clusterid in enumerate(all_label_to_clusterid) if i in pairs[1]]))
# for clusterid_ in all_label_to_clusterid_:
# nsims[i].append(tsims[i, clusterid_])
# hnsims=[ list(filter(lambda x: ((x>n_thrds[i]) & (x<0.999999)), nsim)) for i, nsim in enumerate(nsims)]
# hnsims=sum(hnsims, [])
# hnsims=torch.tensor(hnsims).cuda()
# if hnsims.shape[0]==0: hn_loss=torch.zeros(1).cuda()
# else:
# hn_loss=F.mse_loss(hnsims, torch.zeros(hnsims.shape).cuda())
# hn_loss=F.binary_cross_entropy_with_logits(hnsims, torch.zeros(hnsims.shape).cuda())
# # loss calculate ovelapped
# th_loss=hn_loss
# self.num_tneg+=len(sum(nsims, []))
# self.num_thneg+=hnsims.shape[0]
# else: th_loss=torch.zeros(1).cuda()
# return th_loss
## hard negative mining without prior
| 41.704225
| 149
| 0.595745
|
d182b153ab7aa62535e157f8ccdfe0ea7ca54922
| 6,527
|
py
|
Python
|
tests/test_price_handler.py
|
sohailalam2/qstrader
|
e6d86a3ac3dc507b26e27b1f20c2949a69438ef7
|
[
"MIT"
] | 113
|
2019-01-11T05:55:41.000Z
|
2022-03-27T23:49:47.000Z
|
tests/test_price_handler.py
|
sohailalam2/qstrader
|
e6d86a3ac3dc507b26e27b1f20c2949a69438ef7
|
[
"MIT"
] | 7
|
2019-04-09T05:30:24.000Z
|
2020-09-09T04:52:49.000Z
|
tests/test_price_handler.py
|
sohailalam2/qstrader
|
e6d86a3ac3dc507b26e27b1f20c2949a69438ef7
|
[
"MIT"
] | 54
|
2019-01-10T17:22:14.000Z
|
2022-03-15T23:47:43.000Z
|
import unittest
from qstrader.price_parser import PriceParser
from qstrader.price_handler.historic_csv_tick import HistoricCSVTickPriceHandler
from qstrader.compat import queue
from qstrader import settings
class TestPriceHandlerSimpleCase(unittest.TestCase):
"""
Test the initialisation of a PriceHandler object with
a small list of tickers. Concatenate the ticker data (
pre-generated and stored as a fixture) and stream the
subsequent ticks, checking that the correct bid-ask
values are returned.
"""
def setUp(self):
"""
Set up the PriceHandler object with a small
set of initial tickers.
"""
self.config = settings.TEST
fixtures_path = self.config.CSV_DATA_DIR
events_queue = queue.Queue()
init_tickers = ["GOOG", "AMZN", "MSFT"]
self.price_handler = HistoricCSVTickPriceHandler(
fixtures_path, events_queue, init_tickers
)
def test_stream_all_ticks(self):
"""
The initialisation of the class will open the three
test CSV files, then merge and sort them. They will
then be stored in a member "tick_stream". This will
be used for streaming the ticks.
"""
# Stream to Tick #1 (GOOG)
self.price_handler.stream_next()
self.assertEqual(
self.price_handler.tickers["GOOG"]["timestamp"].strftime(
"%d-%m-%Y %H:%M:%S.%f"
),
"01-02-2016 00:00:01.358000"
)
self.assertEqual(
PriceParser.display(self.price_handler.tickers["GOOG"]["bid"], 5),
683.56000
)
self.assertEqual(
PriceParser.display(self.price_handler.tickers["GOOG"]["ask"], 5),
683.58000
)
# Stream to Tick #2 (AMZN)
self.price_handler.stream_next()
self.assertEqual(
self.price_handler.tickers["AMZN"]["timestamp"].strftime(
"%d-%m-%Y %H:%M:%S.%f"
),
"01-02-2016 00:00:01.562000"
)
self.assertEqual(
PriceParser.display(self.price_handler.tickers["AMZN"]["bid"], 5),
502.10001
)
self.assertEqual(
PriceParser.display(self.price_handler.tickers["AMZN"]["ask"], 5),
502.11999
)
# Stream to Tick #3 (MSFT)
self.price_handler.stream_next()
self.assertEqual(
self.price_handler.tickers["MSFT"]["timestamp"].strftime(
"%d-%m-%Y %H:%M:%S.%f"
),
"01-02-2016 00:00:01.578000"
)
self.assertEqual(
PriceParser.display(self.price_handler.tickers["MSFT"]["bid"], 5),
50.14999
)
self.assertEqual(
PriceParser.display(self.price_handler.tickers["MSFT"]["ask"], 5),
50.17001
)
# Stream to Tick #10 (GOOG)
for i in range(4, 11):
self.price_handler.stream_next()
self.assertEqual(
self.price_handler.tickers["GOOG"]["timestamp"].strftime(
"%d-%m-%Y %H:%M:%S.%f"
),
"01-02-2016 00:00:05.215000"
)
self.assertEqual(
PriceParser.display(self.price_handler.tickers["GOOG"]["bid"], 5),
683.56001
)
self.assertEqual(
PriceParser.display(self.price_handler.tickers["GOOG"]["ask"], 5),
683.57999
)
# Stream to Tick #20 (GOOG)
for i in range(11, 21):
self.price_handler.stream_next()
self.assertEqual(
self.price_handler.tickers["MSFT"]["timestamp"].strftime(
"%d-%m-%Y %H:%M:%S.%f"
),
"01-02-2016 00:00:09.904000"
)
self.assertEqual(
PriceParser.display(self.price_handler.tickers["MSFT"]["bid"], 5),
50.15000
)
self.assertEqual(
PriceParser.display(self.price_handler.tickers["MSFT"]["ask"], 5),
50.17000
)
# Stream to Tick #30 (final tick, AMZN)
for i in range(21, 31):
self.price_handler.stream_next()
self.assertEqual(
self.price_handler.tickers["AMZN"]["timestamp"].strftime(
"%d-%m-%Y %H:%M:%S.%f"
),
"01-02-2016 00:00:14.616000"
)
self.assertEqual(
PriceParser.display(self.price_handler.tickers["AMZN"]["bid"], 5),
502.10015
)
self.assertEqual(
PriceParser.display(self.price_handler.tickers["AMZN"]["ask"], 5),
502.11985
)
def test_subscribe_unsubscribe(self):
"""
Tests the 'subscribe_ticker' and 'unsubscribe_ticker'
methods, and check that they raise exceptions when
appropriate.
"""
# Check unsubscribing a ticker that isn't
# in the price handler list
# self.assertRaises(
# KeyError, lambda: self.price_handler.unsubscribe_ticker("PG")
# )
# Check a ticker that is already subscribed
# to make sure that it doesn't raise an exception
try:
self.price_handler.subscribe_ticker("GOOG")
except Exception as E:
self.fail("subscribe_ticker() raised %s unexpectedly" % E)
# Subscribe a new ticker, without CSV
# self.assertRaises(
# IOError, lambda: self.price_handler.subscribe_ticker("XOM")
# )
# Unsubscribe a current ticker
self.assertTrue("GOOG" in self.price_handler.tickers)
self.assertTrue("GOOG" in self.price_handler.tickers_data)
self.price_handler.unsubscribe_ticker("GOOG")
self.assertTrue("GOOG" not in self.price_handler.tickers)
self.assertTrue("GOOG" not in self.price_handler.tickers_data)
def test_get_best_bid_ask(self):
"""
Tests that the 'get_best_bid_ask' method produces the
correct values depending upon validity of ticker.
"""
bid, ask = self.price_handler.get_best_bid_ask("AMZN")
self.assertEqual(PriceParser.display(bid, 5), 502.10001)
self.assertEqual(PriceParser.display(ask, 5), 502.11999)
bid, ask = self.price_handler.get_best_bid_ask("C")
# TODO WHAT TO DO HERE?.
# self.assertEqual(PriceParser.display(bid, 5), None)
# self.assertEqual(PriceParser.display(ask, 5), None)
if __name__ == "__main__":
unittest.main()
| 34.172775
| 80
| 0.576835
|
fcd02c17a25ebac1ca85a4e924258629118c47e7
| 339
|
py
|
Python
|
Exercises/2.square_elements_and_filter.py
|
GiorgosXonikis/Python-Sample-Code
|
8d31444171138995f740128716e45b29f5e1f7a1
|
[
"MIT"
] | null | null | null |
Exercises/2.square_elements_and_filter.py
|
GiorgosXonikis/Python-Sample-Code
|
8d31444171138995f740128716e45b29f5e1f7a1
|
[
"MIT"
] | null | null | null |
Exercises/2.square_elements_and_filter.py
|
GiorgosXonikis/Python-Sample-Code
|
8d31444171138995f740128716e45b29f5e1f7a1
|
[
"MIT"
] | null | null | null |
# 2.Square Elements
#Write a program that accepts a list and returns the square of each element.
# The list is: [1,2,3,4,5,6,7,8,9,10].
numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
square_numbers = [num ** 2 for num in numbers]
square_numbers_map = list(map(lambda num: num ** 2, numbers))
print(square_numbers)
print(square_numbers_map)
| 26.076923
| 76
| 0.690265
|
ccfc5f135eef7b34124ddce20fde1e08fd006f7a
| 10,878
|
py
|
Python
|
person_follower/scripts/PersonFollower.py
|
FablabHome/The_Essense_of_the_Grey_Region
|
6385ada0879bdc6c00cb707192841fdab9ab7bf1
|
[
"MIT"
] | 1
|
2021-09-23T09:42:32.000Z
|
2021-09-23T09:42:32.000Z
|
person_follower/scripts/PersonFollower.py
|
FablabHome/The_Essense_of_the_Grey_Region
|
6385ada0879bdc6c00cb707192841fdab9ab7bf1
|
[
"MIT"
] | null | null | null |
person_follower/scripts/PersonFollower.py
|
FablabHome/The_Essense_of_the_Grey_Region
|
6385ada0879bdc6c00cb707192841fdab9ab7bf1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
MIT License
Copyright (c) 2020 rootadminWalker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from copy import deepcopy
from typing import List
import numpy as np
import rospy
from cv_bridge import CvBridge
from home_robot_msgs.msg import ObjectBoxes, ObjectBox
from home_robot_msgs.srv import PFInitializer, PFInitializerRequest, PFInitializerResponse, ResetPF, ResetPFRequest
from sensor_msgs.msg import CompressedImage
from std_srvs.srv import Trigger
from core.Detection import PersonReidentification
from core.Dtypes import BBox
from core.Nodes import Node
class PersonFollower(Node):
H = 480
W = 640
CENTROID = (W // 2, H // 2)
SIMIL_ERROR = 0.55
STATE = 'NOT_INITIALIZED' # NORMAL, CONFIRM_LOST, LOST, CONFIRM_REIDENTIFIED
# Timeouts
CONFIRM_LOST_TIMEOUT = rospy.Duration(0.5)
CONFIRM_REIDENTIFIED_TIMEOUT = rospy.Duration(0.5)
# Threshold for the system to determine if a box was a close box
CLOSE_BOX_IOU_THRESHOLD = 0.83
NORMAL_BOX_PADDING = (35, 35)
SEARCH_BOX_PADDING = (25, 25)
def __init__(self):
super(PersonFollower, self).__init__('person_follower', anonymous=False)
bin_path = rospy.get_param('~bin_path')
xml_path = rospy.get_param('~xml_path')
self.person_reid = PersonReidentification(bin_path, xml_path)
self.bridge = CvBridge()
self.front_descriptor = self.back_descriptor = None
self.estimated_target_box = self.last_estimated_box = None
self.rgb_image = None
self.front_img = self.back_img = None
self.detection_boxes = []
# Initialization host building
rospy.Service('pf_initialize', PFInitializer, self.initialized_cb)
# The reset service server
rospy.Service('pf_reset', ResetPF, self.on_reset)
self.__question_answered = False # Just for playing
# Proxy of PFInitialize reset service
self.reset_initializer = rospy.ServiceProxy('pf_init_reset', Trigger)
# This publisher will only publish the box which the system was currently following
self.current_following_box_pub = rospy.Publisher(
'~current_following_box',
ObjectBox,
queue_size=1
)
# This publisher will publish the box whenever person re-id recognized the box
self.estimated_target_publisher = rospy.Publisher(
"~estimated_target_box",
ObjectBox,
queue_size=1,
)
self.image_publisher = rospy.Publisher(
'/PF/drown_image',
CompressedImage,
queue_size=1
)
rospy.Subscriber(
'/YD/boxes',
ObjectBoxes,
self.box_callback,
queue_size=1
)
rospy.set_param("~state", PersonFollower.STATE)
self.main()
def initialized_cb(self, req: PFInitializerRequest):
self.front_descriptor = np.array(req.front_descriptor)
self.back_descriptor = np.array(req.back_descriptor)
self.front_img = self.bridge.compressed_imgmsg_to_cv2(req.front_img)
self.back_img = self.bridge.compressed_imgmsg_to_cv2(req.back_img)
PersonFollower.STATE = 'NORMAL'
rospy.set_param('~state', PersonFollower.STATE)
return PFInitializerResponse(True)
def on_reset(self, req: ResetPFRequest):
# Just for playing
rospy.set_param("~todays_question", "V2hvJ3MgdGhlIGFic29sdXRlIGdvZCBvZiBoeXBlcmRlYXRoPw==")
answer = 'QXNyaWVsIERyZWVtdXJy'
user_answer = req.answer
if user_answer == answer:
self.reset()
self.reset_initializer()
return True
else:
return False
def box_callback(self, detections: ObjectBoxes):
if PersonFollower.STATE != 'NOT_INITIALIZED':
self.detection_boxes = BBox.from_ObjectBoxes(detections)
self.detection_boxes = list(filter(lambda b: b.label.strip() == 'person', self.detection_boxes))
self.rgb_image = self.bridge.compressed_imgmsg_to_cv2(detections.source_img)
H, W, _ = self.rgb_image.shape
PersonFollower.W = W
PersonFollower.CENTROID = (PersonFollower.W // 2, PersonFollower.H // 2)
@staticmethod
def __similarity_lt(similarity):
return similarity > PersonFollower.SIMIL_ERROR
def find_target_person(self, person_boxes: List[BBox]):
for person_box in person_boxes:
current_descriptor = self.person_reid.extract_descriptor(person_box.source_img, crop=False)
# Compare the descriptors
front_similarity = self.person_reid.compare_descriptors(current_descriptor, self.front_descriptor)
back_similarity = self.person_reid.compare_descriptors(current_descriptor, self.back_descriptor)
if self.__similarity_lt(front_similarity) or self.__similarity_lt(back_similarity):
return person_box
else:
return None
@staticmethod
def find_tmp_person(search_box: BBox, person_boxes: List[BBox]):
for person_box in person_boxes:
if person_box.is_inBox(search_box):
return person_box
else:
return None
@staticmethod
def find_overlapped_boxes(target_calc_box: BBox, person_boxes: List[BBox]):
return list(filter(
lambda b: b.iou_score_with(target_calc_box) >= PersonFollower.CLOSE_BOX_IOU_THRESHOLD,
person_boxes
))
def main(self):
# Initialize the timeouts
confirm_lost_timeout = rospy.get_rostime() + PersonFollower.CONFIRM_LOST_TIMEOUT
confirm_reidentified_timeout = rospy.get_rostime() + PersonFollower.CONFIRM_REIDENTIFIED_TIMEOUT
while not rospy.is_shutdown():
if self.rgb_image is None or PersonFollower.STATE == 'NOT_INITIALIZED':
continue
# Update self.last_box only if the target_box was confirmed
if self.estimated_target_box is not None:
self.last_estimated_box = deepcopy(self.estimated_target_box)
self.estimated_target_box = None
# Copy the detection boxes out for safety
current_detection_boxes = deepcopy(self.detection_boxes)
# Get the target boxes
self.estimated_target_box = self.find_target_person(current_detection_boxes)
current_following_box = BBox(label='unrecognized')
if self.estimated_target_box:
# If the current state is NORMAL, publish the box out
if PersonFollower.STATE == 'NORMAL':
current_following_box = self.estimated_target_box
# If the current state is CONFIRM_LOST, then just jump
# back to NORMAL
elif PersonFollower.STATE == 'CONFIRM_LOST':
PersonFollower.STATE = 'NORMAL'
# If the current state is LOST, then go into CONFIRM_REIDENTIFIED
elif PersonFollower.STATE == 'LOST':
confirm_reidentified_timeout = rospy.get_rostime() + PersonFollower.CONFIRM_REIDENTIFIED_TIMEOUT
PersonFollower.STATE = 'CONFIRM_REIDENTIFIED'
# If current state is CONFIRM_REIDENTIFIED, wait for the timeout
elif PersonFollower.STATE == 'CONFIRM_REIDENTIFIED':
if rospy.get_rostime() - confirm_reidentified_timeout >= rospy.Duration(0):
PersonFollower.STATE = 'NORMAL'
# Publish the estimated box without dealing with states
self.estimated_target_publisher.publish(self.estimated_target_box.serialize_as_ObjectBox())
else:
# If the program lost the target, get in CONFIRM_LOST to confirm
# if the target was truly lost
if PersonFollower.STATE == 'NORMAL':
PersonFollower.STATE = 'CONFIRM_LOST'
confirm_lost_timeout = rospy.get_rostime() + PersonFollower.CONFIRM_LOST_TIMEOUT
# If the program was confirming lost, then wait for the timeout
# follow a person which was in the padding_box of our person's last existent
elif PersonFollower.STATE == 'CONFIRM_LOST':
# If the timeout has exceeded, then the program will consider the target
# was truly lost
if rospy.get_rostime() - confirm_lost_timeout >= rospy.Duration(0):
PersonFollower.STATE = 'LOST'
# Follow a temporarily person which is inside the
# padding_box of the last_detected_box
search_box = self.last_estimated_box.generate_padding_box(
padding=PersonFollower.SEARCH_BOX_PADDING,
shape=(PersonFollower.H, PersonFollower.W)
)
tmp_box = self.find_tmp_person(search_box, current_detection_boxes)
if tmp_box:
current_following_box = tmp_box
# If the state was CONFIRM_REIDENTIFIED, it will just went back into LOST
elif PersonFollower.STATE == 'CONFIRM_REIDENTIFIED':
PersonFollower.STATE = 'LOST'
# Set the current state
rospy.set_param("~state", PersonFollower.STATE)
# Publish the to-follow box to the PFRobotHandler
self.current_following_box_pub.publish(current_following_box.serialize_as_ObjectBox())
self.rate.sleep()
def reset(self):
self.detection_boxes = []
PersonFollower.STATE = 'NOT_INITIALIZED'
# self.front_descriptor = self.back_descriptor = None
if __name__ == '__main__':
node = PersonFollower()
| 39.413043
| 116
| 0.66207
|
9ffdaba7a07098cd2552528499bdb0792a8de2e8
| 2,369
|
py
|
Python
|
ansible/venv/lib/python2.7/site-packages/ansible/plugins/httpapi/qradar.py
|
gvashchenkolineate/gvashchenkolineate_infra_trytravis
|
0fb18850afe0d8609693ba4b23f29c7cda17d97f
|
[
"MIT"
] | 17
|
2017-06-07T23:15:01.000Z
|
2021-08-30T14:32:36.000Z
|
ansible/ansible/plugins/httpapi/qradar.py
|
SergeyCherepanov/ansible
|
875711cd2fd6b783c812241c2ed7a954bf6f670f
|
[
"MIT"
] | 9
|
2017-06-25T03:31:52.000Z
|
2021-05-17T23:43:12.000Z
|
ansible/ansible/plugins/httpapi/qradar.py
|
SergeyCherepanov/ansible
|
875711cd2fd6b783c812241c2ed7a954bf6f670f
|
[
"MIT"
] | 3
|
2018-05-26T21:31:22.000Z
|
2019-09-28T17:00:45.000Z
|
# (c) 2019 Red Hat Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
---
author: Ansible Security Automation Team
httpapi : qradar
short_description: HttpApi Plugin for IBM QRadar appliances
description:
- This HttpApi plugin provides methods to connect to IBM QRadar
appliances over a HTTP(S)-based api.
version_added: "2.8"
"""
import json
from ansible.module_utils.basic import to_text
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.plugins.httpapi import HttpApiBase
from ansible.module_utils.connection import ConnectionError
# Content Type and QRadar REST API Version
BASE_HEADERS = {
'Content-Type': 'application/json;charset=UTF-8',
'Version': '9.1',
}
class HttpApi(HttpApiBase):
def logout(self):
response, dummy = self.send_request('POST', '/api/auth/logout')
def send_request(self, request_method, path, payload=None):
data = json.dumps(payload) if payload else '{}'
try:
self._display_request(request_method)
response, response_data = self.connection.send(path, payload, method=request_method, headers=BASE_HEADERS, force_basic_auth=True)
value = self._get_response_value(response_data)
return response.getcode(), self._response_to_json(value)
except AnsibleConnectionFailure as e:
if to_text('401') in to_text(e):
return 401, 'Authentication failure'
else:
return 404, 'Object not found'
except HTTPError as e:
error = json.loads(e.read())
return e.code, error
def _display_request(self, request_method):
self.connection.queue_message('vvvv', 'Web Services: %s %s' % (request_method, self.connection._url))
def _get_response_value(self, response_data):
return to_text(response_data.getvalue())
def _response_to_json(self, response_text):
try:
return json.loads(response_text) if response_text else {}
# JSONDecodeError only available on Python 3.5+
except ValueError:
raise ConnectionError('Invalid JSON response: %s' % response_text)
| 34.838235
| 141
| 0.700718
|
7c311456528d0318925da286c293fd57c7d0d0dc
| 3,980
|
py
|
Python
|
sense_energy_prometheus_exporter/sense_energy_prometheus_exporter/collectors/VoltageCollector.py
|
davidwilemski/sense_energy_prometheus_exporter
|
6966ee0fc48d56e262285677fc7b81500aea7dee
|
[
"MIT"
] | 8
|
2021-07-29T22:11:31.000Z
|
2022-03-04T17:34:20.000Z
|
sense_energy_prometheus_exporter/sense_energy_prometheus_exporter/collectors/VoltageCollector.py
|
davidwilemski/sense_energy_prometheus_exporter
|
6966ee0fc48d56e262285677fc7b81500aea7dee
|
[
"MIT"
] | 5
|
2021-07-16T15:08:15.000Z
|
2022-01-07T05:05:44.000Z
|
sense_energy_prometheus_exporter/sense_energy_prometheus_exporter/collectors/VoltageCollector.py
|
davidwilemski/sense_energy_prometheus_exporter
|
6966ee0fc48d56e262285677fc7b81500aea7dee
|
[
"MIT"
] | 1
|
2021-08-03T05:22:51.000Z
|
2021-08-03T05:22:51.000Z
|
import time
import logging
from .CustomCollector import CustomCollector
from ..client.sense import SenseClient, SenseVoltage
from prometheus_client.core import GaugeMetricFamily, Counter, Gauge
class VoltageCollector(CustomCollector):
voltageMetric: GaugeMetricFamily
scrapesTotalMetric: Counter
scrapeErrorsTotalMetric: Counter
lastScrapeErrorMetric: Gauge
lastScrapeTimestampMetric: Gauge
lastScrapeDurationSecondsMetric: Gauge
# the subsystem prefix is a name for the item type you are scraping
subsystemPrefix: str = "voltage"
# the set of labels that describe each of your items that you are scraping
voltageLabels: [str] = ["account", "name"]
# This is the client that calls external apis for the items you want to scrape
sense_client: SenseClient
def __init__(self, sense_client: SenseClient, namespace: str):
super().__init__(namespace)
self.sense_client = sense_client
self.scrapesTotalMetric = Counter("total", f"Total number of scrapes for {self.subsystemPrefix} stats",
subsystem=f"{self.subsystemPrefix}_scrapes",
namespace=self.namespace)
self.scrapeErrorsTotalMetric = Counter("total",
f"Total number of scrape errors for {self.subsystemPrefix} stats",
subsystem=f"{self.subsystemPrefix}_scrape_errors",
namespace=self.namespace)
self.lastScrapeErrorMetric = Gauge(f"last_{self.subsystemPrefix}_scrape_error",
f"Status of last scrape for {self.subsystemPrefix} stats (1=error, 0=success)",
subsystem="", namespace=self.namespace)
self.lastScrapeTimestampMetric = Gauge(f"last_{self.subsystemPrefix}_scrape_timestamp",
f"Number of seconds between 1970 and last scrape for {self.subsystemPrefix} stats",
subsystem="", namespace=self.namespace)
self.lastScrapeDurationSecondsMetric = Gauge(f"last_{self.subsystemPrefix}_scrape_duration_seconds",
f"Duration of last scrape for {self.subsystemPrefix} stats",
subsystem="", namespace=self.namespace)
'''
This function resets the stats that are scraped from external apis
'''
def reset(self):
self.voltageMetric = GaugeMetricFamily(self.build_name("volts", self.subsystemPrefix),
f"{self.subsystemPrefix} measurement in volts",
labels=self.voltageLabels)
def collect(self):
time_start = time.time()
logging.debug(f"Collecting {self.subsystemPrefix} stats")
logging.debug("Resetting per-call metrics")
self.reset()
error_status = 0
try:
# this is where the api-scraping logic goes
logging.debug("Collecting data for voltage")
voltages: [SenseVoltage] = self.sense_client.voltages
for voltage in voltages:
labels = [voltage[label] for label in self.voltageLabels]
self.voltageMetric.add_metric(
labels=labels, value=voltage.volts)
except Exception as e:
logging.error("Error while getting %s stats via client: %s", self.subsystemPrefix, e)
error_status = 1
self.scrapeErrorsTotalMetric.inc()
self.scrapesTotalMetric.inc()
self.lastScrapeErrorMetric.set(error_status)
time_end = time.time()
self.lastScrapeTimestampMetric.set(time_end)
self.lastScrapeDurationSecondsMetric.set(time_end - time_start)
return [self.voltageMetric]
| 45.747126
| 130
| 0.610302
|
c3898d5a0252320bd6c864dc169fa4937aa82a52
| 368
|
py
|
Python
|
lesson2/web/src/routes0/app.py
|
lucasca95/harvard_flask_python_course
|
76ac51ae6f6b5f5e7460c9c9c40d0c1be52779e1
|
[
"Apache-2.0"
] | null | null | null |
lesson2/web/src/routes0/app.py
|
lucasca95/harvard_flask_python_course
|
76ac51ae6f6b5f5e7460c9c9c40d0c1be52779e1
|
[
"Apache-2.0"
] | null | null | null |
lesson2/web/src/routes0/app.py
|
lucasca95/harvard_flask_python_course
|
76ac51ae6f6b5f5e7460c9c9c40d0c1be52779e1
|
[
"Apache-2.0"
] | null | null | null |
from flask import Flask
# Indicamos a Flask por medio de "__name__" que este archivo "application.py"
# sera una aplicacion web.
app = Flask(__name__)
# el decorador permite indicar que hacer cuando se quiere acceder a la ruta indicada
# en este caso, "/"
@app.route("/")
def index():
return "Hello!"
@app.route("/david")
def david():
return "Hello, David!"
| 24.533333
| 84
| 0.703804
|
87a72b4385e4f7b1a0bd266db2f0aa061a3fe143
| 8,684
|
py
|
Python
|
tests/sandbox/.venv_ccf_sandbox/lib/python3.8/site-packages/mypyc/irbuild/env_class.py
|
iLuSIAnn/test
|
10d0a20dc1a646b5c1f6c7bff2960e3f5df0510e
|
[
"Apache-2.0"
] | 35
|
2016-03-30T09:25:14.000Z
|
2022-03-12T10:53:11.000Z
|
tests/sandbox/.venv_ccf_sandbox/lib/python3.8/site-packages/mypyc/irbuild/env_class.py
|
iLuSIAnn/test
|
10d0a20dc1a646b5c1f6c7bff2960e3f5df0510e
|
[
"Apache-2.0"
] | 36
|
2020-07-27T23:26:53.000Z
|
2021-08-02T23:22:37.000Z
|
tests/sandbox/.venv_ccf_sandbox/lib/python3.8/site-packages/mypyc/irbuild/env_class.py
|
iLuSIAnn/test
|
10d0a20dc1a646b5c1f6c7bff2960e3f5df0510e
|
[
"Apache-2.0"
] | 6
|
2016-01-29T04:33:27.000Z
|
2019-11-03T19:19:43.000Z
|
"""Generate classes representing function environments (+ related operations).
If we have a nested function that has non-local (free) variables, access to the
non-locals is via an instance of an environment class. Example:
def f() -> int:
x = 0 # Make 'x' an attribute of an environment class instance
def g() -> int:
# We have access to the environment class instance to
# allow accessing 'x'
return x + 2
x + 1 # Modify the attribute
return g()
"""
from typing import Optional, Union
from mypy.nodes import FuncDef, SymbolNode
from mypyc.common import SELF_NAME, ENV_ATTR_NAME
from mypyc.ir.ops import Call, GetAttr, SetAttr, Value, Environment, AssignmentTargetAttr
from mypyc.ir.rtypes import RInstance, object_rprimitive
from mypyc.ir.class_ir import ClassIR
from mypyc.irbuild.builder import IRBuilder
from mypyc.irbuild.context import FuncInfo, ImplicitClass, GeneratorClass
def setup_env_class(builder: IRBuilder) -> ClassIR:
"""Generate a class representing a function environment.
Note that the variables in the function environment are not
actually populated here. This is because when the environment
class is generated, the function environment has not yet been
visited. This behavior is allowed so that when the compiler visits
nested functions, it can use the returned ClassIR instance to
figure out free variables it needs to access. The remaining
attributes of the environment class are populated when the
environment registers are loaded.
Return a ClassIR representing an environment for a function
containing a nested function.
"""
env_class = ClassIR('{}_env'.format(builder.fn_info.namespaced_name()),
builder.module_name, is_generated=True)
env_class.attributes[SELF_NAME] = RInstance(env_class)
if builder.fn_info.is_nested:
# If the function is nested, its environment class must contain an environment
# attribute pointing to its encapsulating functions' environment class.
env_class.attributes[ENV_ATTR_NAME] = RInstance(builder.fn_infos[-2].env_class)
env_class.mro = [env_class]
builder.fn_info.env_class = env_class
builder.classes.append(env_class)
return env_class
def finalize_env_class(builder: IRBuilder) -> None:
"""Generate, instantiate, and set up the environment of an environment class."""
instantiate_env_class(builder)
# Iterate through the function arguments and replace local definitions (using registers)
# that were previously added to the environment with references to the function's
# environment class.
if builder.fn_info.is_nested:
add_args_to_env(builder, local=False, base=builder.fn_info.callable_class)
else:
add_args_to_env(builder, local=False, base=builder.fn_info)
def instantiate_env_class(builder: IRBuilder) -> Value:
"""Assign an environment class to a register named after the given function definition."""
curr_env_reg = builder.add(
Call(builder.fn_info.env_class.ctor, [], builder.fn_info.fitem.line)
)
if builder.fn_info.is_nested:
builder.fn_info.callable_class._curr_env_reg = curr_env_reg
builder.add(SetAttr(curr_env_reg,
ENV_ATTR_NAME,
builder.fn_info.callable_class.prev_env_reg,
builder.fn_info.fitem.line))
else:
builder.fn_info._curr_env_reg = curr_env_reg
return curr_env_reg
def load_env_registers(builder: IRBuilder) -> None:
"""Load the registers for the current FuncItem being visited.
Adds the arguments of the FuncItem to the environment. If the
FuncItem is nested inside of another function, then this also
loads all of the outer environments of the FuncItem into registers
so that they can be used when accessing free variables.
"""
add_args_to_env(builder, local=True)
fn_info = builder.fn_info
fitem = fn_info.fitem
if fn_info.is_nested:
load_outer_envs(builder, fn_info.callable_class)
# If this is a FuncDef, then make sure to load the FuncDef into its own environment
# class so that the function can be called recursively.
if isinstance(fitem, FuncDef):
setup_func_for_recursive_call(builder, fitem, fn_info.callable_class)
def load_outer_env(builder: IRBuilder, base: Value, outer_env: Environment) -> Value:
"""Load the environment class for a given base into a register.
Additionally, iterates through all of the SymbolNode and
AssignmentTarget instances of the environment at the given index's
symtable, and adds those instances to the environment of the
current environment. This is done so that the current environment
can access outer environment variables without having to reload
all of the environment registers.
Returns the register where the environment class was loaded.
"""
env = builder.add(GetAttr(base, ENV_ATTR_NAME, builder.fn_info.fitem.line))
assert isinstance(env.type, RInstance), '{} must be of type RInstance'.format(env)
for symbol, target in outer_env.symtable.items():
env.type.class_ir.attributes[symbol.name] = target.type
symbol_target = AssignmentTargetAttr(env, symbol.name)
builder.environment.add_target(symbol, symbol_target)
return env
def load_outer_envs(builder: IRBuilder, base: ImplicitClass) -> None:
index = len(builder.builders) - 2
# Load the first outer environment. This one is special because it gets saved in the
# FuncInfo instance's prev_env_reg field.
if index > 1:
# outer_env = builder.fn_infos[index].environment
outer_env = builder.builders[index].environment
if isinstance(base, GeneratorClass):
base.prev_env_reg = load_outer_env(builder, base.curr_env_reg, outer_env)
else:
base.prev_env_reg = load_outer_env(builder, base.self_reg, outer_env)
env_reg = base.prev_env_reg
index -= 1
# Load the remaining outer environments into registers.
while index > 1:
# outer_env = builder.fn_infos[index].environment
outer_env = builder.builders[index].environment
env_reg = load_outer_env(builder, env_reg, outer_env)
index -= 1
def add_args_to_env(builder: IRBuilder,
local: bool = True,
base: Optional[Union[FuncInfo, ImplicitClass]] = None,
reassign: bool = True) -> None:
fn_info = builder.fn_info
if local:
for arg in fn_info.fitem.arguments:
rtype = builder.type_to_rtype(arg.variable.type)
builder.environment.add_local_reg(arg.variable, rtype, is_arg=True)
else:
for arg in fn_info.fitem.arguments:
if is_free_variable(builder, arg.variable) or fn_info.is_generator:
rtype = builder.type_to_rtype(arg.variable.type)
assert base is not None, 'base cannot be None for adding nonlocal args'
builder.add_var_to_env_class(arg.variable, rtype, base, reassign=reassign)
def setup_func_for_recursive_call(builder: IRBuilder, fdef: FuncDef, base: ImplicitClass) -> None:
"""Enable calling a nested function (with a callable class) recursively.
Adds the instance of the callable class representing the given
FuncDef to a register in the environment so that the function can
be called recursively. Note that this needs to be done only for
nested functions.
"""
# First, set the attribute of the environment class so that GetAttr can be called on it.
prev_env = builder.fn_infos[-2].env_class
prev_env.attributes[fdef.name] = builder.type_to_rtype(fdef.type)
if isinstance(base, GeneratorClass):
# If we are dealing with a generator class, then we need to first get the register
# holding the current environment class, and load the previous environment class from
# there.
prev_env_reg = builder.add(GetAttr(base.curr_env_reg, ENV_ATTR_NAME, -1))
else:
prev_env_reg = base.prev_env_reg
# Obtain the instance of the callable class representing the FuncDef, and add it to the
# current environment.
val = builder.add(GetAttr(prev_env_reg, fdef.name, -1))
target = builder.environment.add_local_reg(fdef, object_rprimitive)
builder.assign(target, val, -1)
def is_free_variable(builder: IRBuilder, symbol: SymbolNode) -> bool:
fitem = builder.fn_info.fitem
return (
fitem in builder.free_variables
and symbol in builder.free_variables[fitem]
)
| 42.360976
| 98
| 0.711654
|
da69dcb59ac45ea31062c0cfa22d8cb4ea3611eb
| 4,128
|
py
|
Python
|
pyzoo/test/zoo/chronos/data/utils/test_public_dataset.py
|
yangw1234/analytics-zoo
|
a8032b8485d7515b3502977507234e8cd07b3cd8
|
[
"Apache-2.0"
] | 1
|
2021-07-08T01:11:04.000Z
|
2021-07-08T01:11:04.000Z
|
pyzoo/test/zoo/chronos/data/utils/test_public_dataset.py
|
zzti-bsj/analytics-zoo
|
a1ffeeed17ff06235a918d45b8bef6ee0f89ff01
|
[
"Apache-2.0"
] | null | null | null |
pyzoo/test/zoo/chronos/data/utils/test_public_dataset.py
|
zzti-bsj/analytics-zoo
|
a1ffeeed17ff06235a918d45b8bef6ee0f89ff01
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import pandas as pd
import pytest
from test.zoo.pipeline.utils.test_utils import ZooTestCase
from zoo.chronos.data.utils.public_dataset import PublicDataset
class TestPublicDataset(ZooTestCase):
def setup_method(self, method):
pass
def teardown_method(self, method):
pass
def test_init_get_dataset(self):
name = 'nyc_taxi'
path = '~/.chronos/dataset/'
public_data = PublicDataset(name, path, redownload=False, with_split=False)
# illegle input.
with pytest.raises(OSError):
PublicDataset(name, path, redownload=True).get_public_data(chunk_size=1024)
with pytest.raises(AssertionError):
PublicDataset(name, path, redownload=False).get_public_data(chunk_size='1024')
def test_get_nyc_taxi(self):
name = 'nyc_taxi'
path = '~/.chronos/dataset'
if os.environ.get('FTP_URI', None):
file_url = f"{os.getenv('FTP_URI')}/analytics-zoo-data/apps/nyc-taxi/nyc_taxi.csv"
public_data = PublicDataset(name, path, redownload=False)
public_data.df = pd.read_csv(file_url, parse_dates=['timestamp'])
tsdata = public_data.get_tsdata(target_col='value', dt_col='timestamp')
assert set(tsdata.df.columns) == {'id', 'timestamp', 'value'}
assert tsdata.df.shape == (10320, 3)
tsdata._check_basic_invariants()
def test_get_network_traffic(self):
name = 'network_traffic'
path = '~/.chronos/dataset'
if os.environ.get('FTP_URI', None):
file_url = f"{os.getenv('FTP_URI')}/analytics-zoo-data/network-traffic/data/data.csv"
public_data = PublicDataset(name, path, redownload=False)
public_data.df = pd.read_csv(file_url)
public_data.df.StartTime = pd.to_datetime(public_data.df.StartTime)
public_data.df.AvgRate = public_data.df.AvgRate.apply(lambda x: float(x[:-4])
if x.endswith("Mbps")
else float(x[:-4])*1000)
tsdata = public_data.get_tsdata(target_col=['AvgRate', 'total'], dt_col='StartTime')
assert tsdata.df.shape == (8760, 5)
assert set(tsdata.df.columns) == {'StartTime', 'EndTime', 'AvgRate', 'total', 'id'}
tsdata._check_basic_invariants()
def test_get_fsi(self):
name = 'fsi'
path = '~/.chronos/dataset'
if os.environ.get('FTP_URI', None):
file_url = f"{os.getenv('FTP_URI')}/analytics-zoo-data/chronos-aiops/m_1932.csv"
public_data = PublicDataset(name, path, redownload=False, with_split=False)
public_data.df = pd.read_csv(file_url, usecols=[1, 2, 3],
names=['time_step', 'cpu_usage', 'mem_usage'])
public_data.df.sort_values(by="time_step", inplace=True)
public_data.df.reset_index(inplace=True, drop=True)
public_data.df.time_step = pd.to_datetime(public_data.df.time_step,
unit='s',
origin=pd.Timestamp('2018-01-01'))
tsdata = public_data.get_tsdata(dt_col='time_step', target_col='cpu_usage')
assert tsdata.df.shape == (61570, 4)
assert set(tsdata.df.columns) == {'time_step', 'cpu_usage', 'mem_usage', 'id'}
tsdata._check_basic_invariants()
| 44.387097
| 97
| 0.617975
|
746d1ef28f9e87de44a654f3c81de5e85eeddefb
| 31,965
|
py
|
Python
|
sdk/kusto/azure-mgmt-kusto/azure/mgmt/kusto/operations/_data_connections_operations.py
|
kushan2018/azure-sdk-for-python
|
08a9296207281f4e90e23cf7a30173863accc867
|
[
"MIT"
] | null | null | null |
sdk/kusto/azure-mgmt-kusto/azure/mgmt/kusto/operations/_data_connections_operations.py
|
kushan2018/azure-sdk-for-python
|
08a9296207281f4e90e23cf7a30173863accc867
|
[
"MIT"
] | 1
|
2020-03-06T05:57:16.000Z
|
2020-03-06T05:57:16.000Z
|
sdk/kusto/azure-mgmt-kusto/azure/mgmt/kusto/operations/_data_connections_operations.py
|
kushan2018/azure-sdk-for-python
|
08a9296207281f4e90e23cf7a30173863accc867
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class DataConnectionsOperations(object):
"""DataConnectionsOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API Version. Constant value: "2019-05-15".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2019-05-15"
self.config = config
def list_by_database(
self, resource_group_name, cluster_name, database_name, custom_headers=None, raw=False, **operation_config):
"""Returns the list of data connections of the given Kusto database.
:param resource_group_name: The name of the resource group containing
the Kusto cluster.
:type resource_group_name: str
:param cluster_name: The name of the Kusto cluster.
:type cluster_name: str
:param database_name: The name of the database in the Kusto cluster.
:type database_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DataConnection
:rtype:
~azure.mgmt.kusto.models.DataConnectionPaged[~azure.mgmt.kusto.models.DataConnection]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_by_database.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.DataConnectionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_by_database.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Kusto/clusters/{clusterName}/databases/{databaseName}/dataConnections'}
def data_connection_validation_method(
self, resource_group_name, cluster_name, database_name, data_connection_name=None, properties=None, custom_headers=None, raw=False, **operation_config):
"""Checks that the data connection parameters are valid.
:param resource_group_name: The name of the resource group containing
the Kusto cluster.
:type resource_group_name: str
:param cluster_name: The name of the Kusto cluster.
:type cluster_name: str
:param database_name: The name of the database in the Kusto cluster.
:type database_name: str
:param data_connection_name: The name of the data connection.
:type data_connection_name: str
:param properties: The data connection properties to validate.
:type properties: ~azure.mgmt.kusto.models.DataConnection
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DataConnectionValidationListResult or ClientRawResponse if
raw=true
:rtype: ~azure.mgmt.kusto.models.DataConnectionValidationListResult or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.DataConnectionValidation(data_connection_name=data_connection_name, properties=properties)
# Construct URL
url = self.data_connection_validation_method.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'DataConnectionValidation')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DataConnectionValidationListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
data_connection_validation_method.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Kusto/clusters/{clusterName}/databases/{databaseName}/dataConnectionValidation'}
def check_name_availability(
self, resource_group_name, cluster_name, database_name, name, custom_headers=None, raw=False, **operation_config):
"""Checks that the data connection name is valid and is not already in
use.
:param resource_group_name: The name of the resource group containing
the Kusto cluster.
:type resource_group_name: str
:param cluster_name: The name of the Kusto cluster.
:type cluster_name: str
:param database_name: The name of the database in the Kusto cluster.
:type database_name: str
:param name: Data Connection name.
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: CheckNameResult or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.kusto.models.CheckNameResult or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
data_connection_name = models.DataConnectionCheckNameRequest(name=name)
# Construct URL
url = self.check_name_availability.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(data_connection_name, 'DataConnectionCheckNameRequest')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CheckNameResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
check_name_availability.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Kusto/clusters/{clusterName}/databases/{databaseName}/checkNameAvailability'}
def get(
self, resource_group_name, cluster_name, database_name, data_connection_name, custom_headers=None, raw=False, **operation_config):
"""Returns a data connection.
:param resource_group_name: The name of the resource group containing
the Kusto cluster.
:type resource_group_name: str
:param cluster_name: The name of the Kusto cluster.
:type cluster_name: str
:param database_name: The name of the database in the Kusto cluster.
:type database_name: str
:param data_connection_name: The name of the data connection.
:type data_connection_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DataConnection or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.kusto.models.DataConnection or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'dataConnectionName': self._serialize.url("data_connection_name", data_connection_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DataConnection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Kusto/clusters/{clusterName}/databases/{databaseName}/dataConnections/{dataConnectionName}'}
def _create_or_update_initial(
self, resource_group_name, cluster_name, database_name, data_connection_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'dataConnectionName': self._serialize.url("data_connection_name", data_connection_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'DataConnection')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DataConnection', response)
if response.status_code == 201:
deserialized = self._deserialize('DataConnection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, cluster_name, database_name, data_connection_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates or updates a data connection.
:param resource_group_name: The name of the resource group containing
the Kusto cluster.
:type resource_group_name: str
:param cluster_name: The name of the Kusto cluster.
:type cluster_name: str
:param database_name: The name of the database in the Kusto cluster.
:type database_name: str
:param data_connection_name: The name of the data connection.
:type data_connection_name: str
:param parameters: The data connection parameters supplied to the
CreateOrUpdate operation.
:type parameters: ~azure.mgmt.kusto.models.DataConnection
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns DataConnection or
ClientRawResponse<DataConnection> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.kusto.models.DataConnection]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.kusto.models.DataConnection]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
database_name=database_name,
data_connection_name=data_connection_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('DataConnection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Kusto/clusters/{clusterName}/databases/{databaseName}/dataConnections/{dataConnectionName}'}
def _update_initial(
self, resource_group_name, cluster_name, database_name, data_connection_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'dataConnectionName': self._serialize.url("data_connection_name", data_connection_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'DataConnection')
# Construct and send request
request = self._client.patch(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DataConnection', response)
if response.status_code == 201:
deserialized = self._deserialize('DataConnection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update(
self, resource_group_name, cluster_name, database_name, data_connection_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Updates a data connection.
:param resource_group_name: The name of the resource group containing
the Kusto cluster.
:type resource_group_name: str
:param cluster_name: The name of the Kusto cluster.
:type cluster_name: str
:param database_name: The name of the database in the Kusto cluster.
:type database_name: str
:param data_connection_name: The name of the data connection.
:type data_connection_name: str
:param parameters: The data connection parameters supplied to the
Update operation.
:type parameters: ~azure.mgmt.kusto.models.DataConnection
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns DataConnection or
ClientRawResponse<DataConnection> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.kusto.models.DataConnection]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.kusto.models.DataConnection]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._update_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
database_name=database_name,
data_connection_name=data_connection_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('DataConnection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Kusto/clusters/{clusterName}/databases/{databaseName}/dataConnections/{dataConnectionName}'}
def _delete_initial(
self, resource_group_name, cluster_name, database_name, data_connection_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'dataConnectionName': self._serialize.url("data_connection_name", data_connection_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, cluster_name, database_name, data_connection_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the data connection with the given name.
:param resource_group_name: The name of the resource group containing
the Kusto cluster.
:type resource_group_name: str
:param cluster_name: The name of the Kusto cluster.
:type cluster_name: str
:param database_name: The name of the database in the Kusto cluster.
:type database_name: str
:param data_connection_name: The name of the data connection.
:type data_connection_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
database_name=database_name,
data_connection_name=data_connection_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Kusto/clusters/{clusterName}/databases/{databaseName}/dataConnections/{dataConnectionName}'}
| 49.252696
| 225
| 0.681214
|
11b019ad64ef2a97d16c65417f235c291f1b7e9c
| 2,634
|
py
|
Python
|
gestionairweb/api/serializers.py
|
HEG-Arc/paleo-2015-gestionair-web
|
8ca446581778bd875f47066175045f054b76d525
|
[
"BSD-3-Clause"
] | null | null | null |
gestionairweb/api/serializers.py
|
HEG-Arc/paleo-2015-gestionair-web
|
8ca446581778bd875f47066175045f054b76d525
|
[
"BSD-3-Clause"
] | 1
|
2015-07-18T17:29:34.000Z
|
2016-08-15T07:33:00.000Z
|
gestionairweb/api/serializers.py
|
HEG-Arc/paleo-2015-gestionair-web
|
8ca446581778bd875f47066175045f054b76d525
|
[
"BSD-3-Clause"
] | null | null | null |
from rest_framework import serializers
from gestionairweb.callcenter.models import Language, Game, Player, Answer, Question, Translation, Department
from gestionairweb import settings
from .models import Score, Event, Statistic
class LanguageSerializer(serializers.ModelSerializer):
class Meta:
model = Language
flag = serializers.SerializerMethodField()
def get_flag(self, obj):
return '%s%s' % (settings.STATIC_URL, obj.flag)
class DepartmentSerializer(serializers.ModelSerializer):
class Meta:
model = Department
class TranslationSerializer(serializers.ModelSerializer):
class Meta:
model = Translation
fields = ('language', 'text')
class QuestionSerializer(serializers.ModelSerializer):
class Meta:
model = Question
fields = ('number', 'translations')
translations = TranslationSerializer(many=True, read_only=True)
class PlayerAnswerSerializer(serializers.ModelSerializer):
class Meta:
model = Answer
fields = ('sequence', 'correct', 'answer')
def to_representation(self, instance):
ret = super(serializers.ModelSerializer, self).to_representation(instance)
translation = instance.question
ret['question'] = translation.question.number
ret['code'] = translation.language.code
ret['duration'] = 0
if instance.hangup_time is not None and instance.pickup_time is not None:
ret['duration'] = (instance.hangup_time - instance.pickup_time).total_seconds()
return ret
class GamePlayerSerializer(serializers.ModelSerializer):
class Meta:
model = Player
fields = ('number', 'name', 'score', 'answers')
answers = PlayerAnswerSerializer(many=True, read_only=True)
class GameDetailSerializer(serializers.ModelSerializer):
class Meta:
model = Game
players = GamePlayerSerializer(many=True, read_only=True)
class GameSerializer(serializers.ModelSerializer):
class Meta:
model = Game
fields = ('id', 'num_players', 'score_max', 'score_total', 'code', 'team', 'start_time')
num_players = serializers.IntegerField(required=False, read_only=True)
score_max = serializers.IntegerField(required=False, read_only=True)
score_total = serializers.IntegerField(required=False, read_only=True)
class ScoreSerializer(serializers.ModelSerializer):
class Meta:
model = Score
class EventSerializer(serializers.ModelSerializer):
class Meta:
model = Event
class StatisticSerializer(serializers.ModelSerializer):
class Meta:
model = Statistic
| 29.595506
| 109
| 0.709567
|
7f3603874005cfa3bd3859b912eb4c9d9a32454a
| 43,970
|
py
|
Python
|
Project 4 Reinforcement/reinforcementTestClasses.py
|
TonyStarkLi/AI-PACMAN
|
7f33990c63f32b0ea3746e3ba4dce3860406b82c
|
[
"MIT"
] | 1
|
2015-12-29T08:03:14.000Z
|
2015-12-29T08:03:14.000Z
|
reinforcement-learning/reinforcementTestClasses.py
|
dragoon/edX-AI-course
|
db0969117654e0e1dd86d5b5867bbb8b608ddc59
|
[
"MIT"
] | null | null | null |
reinforcement-learning/reinforcementTestClasses.py
|
dragoon/edX-AI-course
|
db0969117654e0e1dd86d5b5867bbb8b608ddc59
|
[
"MIT"
] | 2
|
2018-09-30T06:58:34.000Z
|
2021-02-03T08:59:32.000Z
|
# reinforcementTestClasses.py
# ---------------------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to
# http://inst.eecs.berkeley.edu/~cs188/pacman/pacman.html
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
import testClasses
import random, math, traceback, sys, os
import layout, textDisplay, pacman, gridworld
import time
from util import Counter, TimeoutFunction, FixedRandom
from collections import defaultdict
from pprint import PrettyPrinter
from hashlib import sha1
pp = PrettyPrinter()
VERBOSE = False
import gridworld
LIVINGREWARD = -0.1
NOISE = 0.2
class ValueIterationTest(testClasses.TestCase):
def __init__(self, question, testDict):
super(ValueIterationTest, self).__init__(question, testDict)
self.discount = float(testDict['discount'])
self.grid = gridworld.Gridworld(parseGrid(testDict['grid']))
iterations = int(testDict['valueIterations'])
if 'noise' in testDict: self.grid.setNoise(float(testDict['noise']))
if 'livingReward' in testDict: self.grid.setLivingReward(float(testDict['livingReward']))
maxPreIterations = 10
self.numsIterationsForDisplay = range(min(iterations, maxPreIterations))
self.testOutFile = testDict['test_out_file']
if maxPreIterations < iterations:
self.numsIterationsForDisplay.append(iterations)
def writeFailureFile(self, string):
with open(self.testOutFile, 'w') as handle:
handle.write(string)
def removeFailureFileIfExists(self):
if os.path.exists(self.testOutFile):
os.remove(self.testOutFile)
def execute(self, grades, moduleDict, solutionDict):
failureOutputFileString = ''
failureOutputStdString = ''
for n in self.numsIterationsForDisplay:
checkPolicy = (n == self.numsIterationsForDisplay[-1])
testPass, stdOutString, fileOutString = self.executeNIterations(grades, moduleDict, solutionDict, n, checkPolicy)
failureOutputStdString += stdOutString
failureOutputFileString += fileOutString
if not testPass:
self.addMessage(failureOutputStdString)
self.addMessage('For more details to help you debug, see test output file %s\n\n' % self.testOutFile)
self.writeFailureFile(failureOutputFileString)
return self.testFail(grades)
self.removeFailureFileIfExists()
return self.testPass(grades)
def executeNIterations(self, grades, moduleDict, solutionDict, n, checkPolicy):
testPass = True
valuesPretty, qValuesPretty, actions, policyPretty = self.runAgent(moduleDict, n)
stdOutString = ''
fileOutString = ''
valuesKey = "values_k_%d" % n
if self.comparePrettyValues(valuesPretty, solutionDict[valuesKey]):
fileOutString += "Values at iteration %d are correct.\n" % n
fileOutString += " Student/correct solution:\n %s\n" % self.prettyValueSolutionString(valuesKey, valuesPretty)
else:
testPass = False
outString = "Values at iteration %d are NOT correct.\n" % n
outString += " Student solution:\n %s\n" % self.prettyValueSolutionString(valuesKey, valuesPretty)
outString += " Correct solution:\n %s\n" % self.prettyValueSolutionString(valuesKey, solutionDict[valuesKey])
stdOutString += outString
fileOutString += outString
for action in actions:
qValuesKey = 'q_values_k_%d_action_%s' % (n, action)
qValues = qValuesPretty[action]
if self.comparePrettyValues(qValues, solutionDict[qValuesKey]):
fileOutString += "Q-Values at iteration %d for action %s are correct.\n" % (n, action)
fileOutString += " Student/correct solution:\n %s\n" % self.prettyValueSolutionString(qValuesKey, qValues)
else:
testPass = False
outString = "Q-Values at iteration %d for action %s are NOT correct.\n" % (n, action)
outString += " Student solution:\n %s\n" % self.prettyValueSolutionString(qValuesKey, qValues)
outString += " Correct solution:\n %s\n" % self.prettyValueSolutionString(qValuesKey, solutionDict[qValuesKey])
stdOutString += outString
fileOutString += outString
if checkPolicy:
if not self.comparePrettyValues(policyPretty, solutionDict['policy']):
testPass = False
outString = "Policy is NOT correct.\n"
outString += " Student solution:\n %s\n" % self.prettyValueSolutionString('policy', policyPretty)
outString += " Correct solution:\n %s\n" % self.prettyValueSolutionString('policy', solutionDict['policy'])
stdOutString += outString
fileOutString += outString
return testPass, stdOutString, fileOutString
def writeSolution(self, moduleDict, filePath):
with open(filePath, 'w') as handle:
policyPretty = ''
actions = []
for n in self.numsIterationsForDisplay:
valuesPretty, qValuesPretty, actions, policyPretty = self.runAgent(moduleDict, n)
handle.write(self.prettyValueSolutionString('values_k_%d' % n, valuesPretty))
for action in actions:
handle.write(self.prettyValueSolutionString('q_values_k_%d_action_%s' % (n, action), qValuesPretty[action]))
handle.write(self.prettyValueSolutionString('policy', policyPretty))
handle.write(self.prettyValueSolutionString('actions', '\n'.join(actions) + '\n'))
return True
def runAgent(self, moduleDict, numIterations):
agent = moduleDict['valueIterationAgents'].ValueIterationAgent(self.grid, discount=self.discount, iterations=numIterations)
states = self.grid.getStates()
actions = list(reduce(lambda a, b: set(a).union(b), [self.grid.getPossibleActions(state) for state in states]))
values = {}
qValues = {}
policy = {}
for state in states:
values[state] = agent.getValue(state)
policy[state] = agent.computeActionFromValues(state)
possibleActions = self.grid.getPossibleActions(state)
for action in actions:
if not qValues.has_key(action):
qValues[action] = {}
if action in possibleActions:
qValues[action][state] = agent.computeQValueFromValues(state, action)
else:
qValues[action][state] = None
valuesPretty = self.prettyValues(values)
policyPretty = self.prettyPolicy(policy)
qValuesPretty = {}
for action in actions:
qValuesPretty[action] = self.prettyValues(qValues[action])
return (valuesPretty, qValuesPretty, actions, policyPretty)
def prettyPrint(self, elements, formatString):
pretty = ''
states = self.grid.getStates()
for ybar in range(self.grid.grid.height):
y = self.grid.grid.height-1-ybar
row = []
for x in range(self.grid.grid.width):
if (x, y) in states:
value = elements[(x, y)]
if value is None:
row.append(' illegal')
else:
row.append(formatString.format(elements[(x,y)]))
else:
row.append('_' * 10)
pretty += ' %s\n' % (" ".join(row), )
pretty += '\n'
return pretty
def prettyValues(self, values):
return self.prettyPrint(values, '{0:10.4f}')
def prettyPolicy(self, policy):
return self.prettyPrint(policy, '{0:10s}')
def prettyValueSolutionString(self, name, pretty):
return '%s: """\n%s\n"""\n\n' % (name, pretty.rstrip())
def comparePrettyValues(self, aPretty, bPretty, tolerance=0.01):
aList = self.parsePrettyValues(aPretty)
bList = self.parsePrettyValues(bPretty)
if len(aList) != len(bList):
return False
for a, b in zip(aList, bList):
try:
aNum = float(a)
bNum = float(b)
# error = abs((aNum - bNum) / ((aNum + bNum) / 2.0))
error = abs(aNum - bNum)
if error > tolerance:
return False
except ValueError:
if a.strip() != b.strip():
return False
return True
def parsePrettyValues(self, pretty):
values = pretty.split()
return values
class ApproximateQLearningTest(testClasses.TestCase):
def __init__(self, question, testDict):
super(ApproximateQLearningTest, self).__init__(question, testDict)
self.discount = float(testDict['discount'])
self.grid = gridworld.Gridworld(parseGrid(testDict['grid']))
if 'noise' in testDict: self.grid.setNoise(float(testDict['noise']))
if 'livingReward' in testDict: self.grid.setLivingReward(float(testDict['livingReward']))
self.grid = gridworld.Gridworld(parseGrid(testDict['grid']))
self.env = gridworld.GridworldEnvironment(self.grid)
self.epsilon = float(testDict['epsilon'])
self.learningRate = float(testDict['learningRate'])
self.extractor = 'IdentityExtractor'
if 'extractor' in testDict:
self.extractor = testDict['extractor']
self.opts = {'actionFn': self.env.getPossibleActions, 'epsilon': self.epsilon, 'gamma': self.discount, 'alpha': self.learningRate}
numExperiences = int(testDict['numExperiences'])
maxPreExperiences = 10
self.numsExperiencesForDisplay = range(min(numExperiences, maxPreExperiences))
self.testOutFile = testDict['test_out_file']
if maxPreExperiences < numExperiences:
self.numsExperiencesForDisplay.append(numExperiences)
def writeFailureFile(self, string):
with open(self.testOutFile, 'w') as handle:
handle.write(string)
def removeFailureFileIfExists(self):
if os.path.exists(self.testOutFile):
os.remove(self.testOutFile)
def execute(self, grades, moduleDict, solutionDict):
failureOutputFileString = ''
failureOutputStdString = ''
for n in self.numsExperiencesForDisplay:
testPass, stdOutString, fileOutString = self.executeNExperiences(grades, moduleDict, solutionDict, n)
failureOutputStdString += stdOutString
failureOutputFileString += fileOutString
if not testPass:
self.addMessage(failureOutputStdString)
self.addMessage('For more details to help you debug, see test output file %s\n\n' % self.testOutFile)
self.writeFailureFile(failureOutputFileString)
return self.testFail(grades)
self.removeFailureFileIfExists()
return self.testPass(grades)
def executeNExperiences(self, grades, moduleDict, solutionDict, n):
testPass = True
qValuesPretty, weights, actions, lastExperience = self.runAgent(moduleDict, n)
stdOutString = ''
fileOutString = "==================== Iteration %d ====================\n" % n
if lastExperience is not None:
fileOutString += "Agent observed the transition (startState = %s, action = %s, endState = %s, reward = %f)\n\n" % lastExperience
weightsKey = 'weights_k_%d' % n
if weights == eval(solutionDict[weightsKey]):
fileOutString += "Weights at iteration %d are correct." % n
fileOutString += " Student/correct solution:\n\n%s\n\n" % pp.pformat(weights)
for action in actions:
qValuesKey = 'q_values_k_%d_action_%s' % (n, action)
qValues = qValuesPretty[action]
if self.comparePrettyValues(qValues, solutionDict[qValuesKey]):
fileOutString += "Q-Values at iteration %d for action '%s' are correct." % (n, action)
fileOutString += " Student/correct solution:\n\t%s" % self.prettyValueSolutionString(qValuesKey, qValues)
else:
testPass = False
outString = "Q-Values at iteration %d for action '%s' are NOT correct." % (n, action)
outString += " Student solution:\n\t%s" % self.prettyValueSolutionString(qValuesKey, qValues)
outString += " Correct solution:\n\t%s" % self.prettyValueSolutionString(qValuesKey, solutionDict[qValuesKey])
stdOutString += outString
fileOutString += outString
return testPass, stdOutString, fileOutString
def writeSolution(self, moduleDict, filePath):
with open(filePath, 'w') as handle:
for n in self.numsExperiencesForDisplay:
qValuesPretty, weights, actions, _ = self.runAgent(moduleDict, n)
handle.write(self.prettyValueSolutionString('weights_k_%d' % n, pp.pformat(weights)))
for action in actions:
handle.write(self.prettyValueSolutionString('q_values_k_%d_action_%s' % (n, action), qValuesPretty[action]))
return True
def runAgent(self, moduleDict, numExperiences):
agent = moduleDict['qlearningAgents'].ApproximateQAgent(extractor=self.extractor, **self.opts)
states = filter(lambda state : len(self.grid.getPossibleActions(state)) > 0, self.grid.getStates())
states.sort()
randObj = FixedRandom().random
# choose a random start state and a random possible action from that state
# get the next state and reward from the transition function
lastExperience = None
for i in range(numExperiences):
startState = randObj.choice(states)
action = randObj.choice(self.grid.getPossibleActions(startState))
(endState, reward) = self.env.getRandomNextState(startState, action, randObj=randObj)
lastExperience = (startState, action, endState, reward)
agent.update(*lastExperience)
actions = list(reduce(lambda a, b: set(a).union(b), [self.grid.getPossibleActions(state) for state in states]))
qValues = {}
weights = agent.getWeights()
for state in states:
possibleActions = self.grid.getPossibleActions(state)
for action in actions:
if not qValues.has_key(action):
qValues[action] = {}
if action in possibleActions:
qValues[action][state] = agent.getQValue(state, action)
else:
qValues[action][state] = None
qValuesPretty = {}
for action in actions:
qValuesPretty[action] = self.prettyValues(qValues[action])
return (qValuesPretty, weights, actions, lastExperience)
def prettyPrint(self, elements, formatString):
pretty = ''
states = self.grid.getStates()
for ybar in range(self.grid.grid.height):
y = self.grid.grid.height-1-ybar
row = []
for x in range(self.grid.grid.width):
if (x, y) in states:
value = elements[(x, y)]
if value is None:
row.append(' illegal')
else:
row.append(formatString.format(elements[(x,y)]))
else:
row.append('_' * 10)
pretty += ' %s\n' % (" ".join(row), )
pretty += '\n'
return pretty
def prettyValues(self, values):
return self.prettyPrint(values, '{0:10.4f}')
def prettyPolicy(self, policy):
return self.prettyPrint(policy, '{0:10s}')
def prettyValueSolutionString(self, name, pretty):
return '%s: """\n%s\n"""\n\n' % (name, pretty.rstrip())
def comparePrettyValues(self, aPretty, bPretty, tolerance=0.01):
aList = self.parsePrettyValues(aPretty)
bList = self.parsePrettyValues(bPretty)
if len(aList) != len(bList):
return False
for a, b in zip(aList, bList):
try:
aNum = float(a)
bNum = float(b)
# error = abs((aNum - bNum) / ((aNum + bNum) / 2.0))
error = abs(aNum - bNum)
if error > tolerance:
return False
except ValueError:
if a.strip() != b.strip():
return False
return True
def parsePrettyValues(self, pretty):
values = pretty.split()
return values
class QLearningTest(testClasses.TestCase):
def __init__(self, question, testDict):
super(QLearningTest, self).__init__(question, testDict)
self.discount = float(testDict['discount'])
self.grid = gridworld.Gridworld(parseGrid(testDict['grid']))
if 'noise' in testDict: self.grid.setNoise(float(testDict['noise']))
if 'livingReward' in testDict: self.grid.setLivingReward(float(testDict['livingReward']))
self.grid = gridworld.Gridworld(parseGrid(testDict['grid']))
self.env = gridworld.GridworldEnvironment(self.grid)
self.epsilon = float(testDict['epsilon'])
self.learningRate = float(testDict['learningRate'])
self.opts = {'actionFn': self.env.getPossibleActions, 'epsilon': self.epsilon, 'gamma': self.discount, 'alpha': self.learningRate}
numExperiences = int(testDict['numExperiences'])
maxPreExperiences = 10
self.numsExperiencesForDisplay = range(min(numExperiences, maxPreExperiences))
self.testOutFile = testDict['test_out_file']
if maxPreExperiences < numExperiences:
self.numsExperiencesForDisplay.append(numExperiences)
def writeFailureFile(self, string):
with open(self.testOutFile, 'w') as handle:
handle.write(string)
def removeFailureFileIfExists(self):
if os.path.exists(self.testOutFile):
os.remove(self.testOutFile)
def execute(self, grades, moduleDict, solutionDict):
failureOutputFileString = ''
failureOutputStdString = ''
for n in self.numsExperiencesForDisplay:
checkValuesAndPolicy = (n == self.numsExperiencesForDisplay[-1])
testPass, stdOutString, fileOutString = self.executeNExperiences(grades, moduleDict, solutionDict, n, checkValuesAndPolicy)
failureOutputStdString += stdOutString
failureOutputFileString += fileOutString
if not testPass:
self.addMessage(failureOutputStdString)
self.addMessage('For more details to help you debug, see test output file %s\n\n' % self.testOutFile)
self.writeFailureFile(failureOutputFileString)
return self.testFail(grades)
self.removeFailureFileIfExists()
return self.testPass(grades)
def executeNExperiences(self, grades, moduleDict, solutionDict, n, checkValuesAndPolicy):
testPass = True
valuesPretty, qValuesPretty, actions, policyPretty, lastExperience = self.runAgent(moduleDict, n)
stdOutString = ''
fileOutString = "==================== Iteration %d ====================\n" % n
if lastExperience is not None:
fileOutString += "Agent observed the transition (startState = %s, action = %s, endState = %s, reward = %f)\n\n\n" % lastExperience
for action in actions:
qValuesKey = 'q_values_k_%d_action_%s' % (n, action)
qValues = qValuesPretty[action]
if self.comparePrettyValues(qValues, solutionDict[qValuesKey]):
fileOutString += "Q-Values at iteration %d for action '%s' are correct." % (n, action)
fileOutString += " Student/correct solution:\n\t%s" % self.prettyValueSolutionString(qValuesKey, qValues)
else:
testPass = False
outString = "Q-Values at iteration %d for action '%s' are NOT correct." % (n, action)
outString += " Student solution:\n\t%s" % self.prettyValueSolutionString(qValuesKey, qValues)
outString += " Correct solution:\n\t%s" % self.prettyValueSolutionString(qValuesKey, solutionDict[qValuesKey])
stdOutString += outString
fileOutString += outString
if checkValuesAndPolicy:
if not self.comparePrettyValues(valuesPretty, solutionDict['values']):
testPass = False
outString = "Values are NOT correct."
outString += " Student solution:\n\t%s" % self.prettyValueSolutionString('values', valuesPretty)
outString += " Correct solution:\n\t%s" % self.prettyValueSolutionString('values', solutionDict['values'])
stdOutString += outString
fileOutString += outString
if not self.comparePrettyValues(policyPretty, solutionDict['policy']):
testPass = False
outString = "Policy is NOT correct."
outString += " Student solution:\n\t%s" % self.prettyValueSolutionString('policy', policyPretty)
outString += " Correct solution:\n\t%s" % self.prettyValueSolutionString('policy', solutionDict['policy'])
stdOutString += outString
fileOutString += outString
return testPass, stdOutString, fileOutString
def writeSolution(self, moduleDict, filePath):
with open(filePath, 'w') as handle:
valuesPretty = ''
policyPretty = ''
for n in self.numsExperiencesForDisplay:
valuesPretty, qValuesPretty, actions, policyPretty, _ = self.runAgent(moduleDict, n)
for action in actions:
handle.write(self.prettyValueSolutionString('q_values_k_%d_action_%s' % (n, action), qValuesPretty[action]))
handle.write(self.prettyValueSolutionString('values', valuesPretty))
handle.write(self.prettyValueSolutionString('policy', policyPretty))
return True
def runAgent(self, moduleDict, numExperiences):
agent = moduleDict['qlearningAgents'].QLearningAgent(**self.opts)
states = filter(lambda state : len(self.grid.getPossibleActions(state)) > 0, self.grid.getStates())
states.sort()
randObj = FixedRandom().random
# choose a random start state and a random possible action from that state
# get the next state and reward from the transition function
lastExperience = None
for i in range(numExperiences):
startState = randObj.choice(states)
action = randObj.choice(self.grid.getPossibleActions(startState))
(endState, reward) = self.env.getRandomNextState(startState, action, randObj=randObj)
lastExperience = (startState, action, endState, reward)
agent.update(*lastExperience)
actions = list(reduce(lambda a, b: set(a).union(b), [self.grid.getPossibleActions(state) for state in states]))
values = {}
qValues = {}
policy = {}
for state in states:
values[state] = agent.computeValueFromQValues(state)
policy[state] = agent.computeActionFromQValues(state)
possibleActions = self.grid.getPossibleActions(state)
for action in actions:
if not qValues.has_key(action):
qValues[action] = {}
if action in possibleActions:
qValues[action][state] = agent.getQValue(state, action)
else:
qValues[action][state] = None
valuesPretty = self.prettyValues(values)
policyPretty = self.prettyPolicy(policy)
qValuesPretty = {}
for action in actions:
qValuesPretty[action] = self.prettyValues(qValues[action])
return (valuesPretty, qValuesPretty, actions, policyPretty, lastExperience)
def prettyPrint(self, elements, formatString):
pretty = ''
states = self.grid.getStates()
for ybar in range(self.grid.grid.height):
y = self.grid.grid.height-1-ybar
row = []
for x in range(self.grid.grid.width):
if (x, y) in states:
value = elements[(x, y)]
if value is None:
row.append(' illegal')
else:
row.append(formatString.format(elements[(x,y)]))
else:
row.append('_' * 10)
pretty += ' %s\n' % (" ".join(row), )
pretty += '\n'
return pretty
def prettyValues(self, values):
return self.prettyPrint(values, '{0:10.4f}')
def prettyPolicy(self, policy):
return self.prettyPrint(policy, '{0:10s}')
def prettyValueSolutionString(self, name, pretty):
return '%s: """\n%s\n"""\n\n' % (name, pretty.rstrip())
def comparePrettyValues(self, aPretty, bPretty, tolerance=0.01):
aList = self.parsePrettyValues(aPretty)
bList = self.parsePrettyValues(bPretty)
if len(aList) != len(bList):
return False
for a, b in zip(aList, bList):
try:
aNum = float(a)
bNum = float(b)
# error = abs((aNum - bNum) / ((aNum + bNum) / 2.0))
error = abs(aNum - bNum)
if error > tolerance:
return False
except ValueError:
if a.strip() != b.strip():
return False
return True
def parsePrettyValues(self, pretty):
values = pretty.split()
return values
class EpsilonGreedyTest(testClasses.TestCase):
def __init__(self, question, testDict):
super(EpsilonGreedyTest, self).__init__(question, testDict)
self.discount = float(testDict['discount'])
self.grid = gridworld.Gridworld(parseGrid(testDict['grid']))
if 'noise' in testDict: self.grid.setNoise(float(testDict['noise']))
if 'livingReward' in testDict: self.grid.setLivingReward(float(testDict['livingReward']))
self.grid = gridworld.Gridworld(parseGrid(testDict['grid']))
self.env = gridworld.GridworldEnvironment(self.grid)
self.epsilon = float(testDict['epsilon'])
self.learningRate = float(testDict['learningRate'])
self.numExperiences = int(testDict['numExperiences'])
self.numIterations = int(testDict['iterations'])
self.opts = {'actionFn': self.env.getPossibleActions, 'epsilon': self.epsilon, 'gamma': self.discount, 'alpha': self.learningRate}
def execute(self, grades, moduleDict, solutionDict):
if self.testEpsilonGreedy(moduleDict):
return self.testPass(grades)
else:
return self.testFail(grades)
def writeSolution(self, moduleDict, filePath):
with open(filePath, 'w') as handle:
handle.write('# This is the solution file for %s.\n' % self.path)
handle.write('# File intentionally blank.\n')
return True
def runAgent(self, moduleDict):
agent = moduleDict['qlearningAgents'].QLearningAgent(**self.opts)
states = filter(lambda state : len(self.grid.getPossibleActions(state)) > 0, self.grid.getStates())
states.sort()
randObj = FixedRandom().random
# choose a random start state and a random possible action from that state
# get the next state and reward from the transition function
for i in range(self.numExperiences):
startState = randObj.choice(states)
action = randObj.choice(self.grid.getPossibleActions(startState))
(endState, reward) = self.env.getRandomNextState(startState, action, randObj=randObj)
agent.update(startState, action, endState, reward)
return agent
def testEpsilonGreedy(self, moduleDict, tolerance=0.025):
agent = self.runAgent(moduleDict)
for state in self.grid.getStates():
numLegalActions = len(agent.getLegalActions(state))
if numLegalActions <= 1:
continue
numGreedyChoices = 0
optimalAction = agent.computeActionFromQValues(state)
for iteration in range(self.numIterations):
# assume that their computeActionFromQValues implementation is correct (q4 tests this)
if agent.getAction(state) == optimalAction:
numGreedyChoices += 1
# e = epsilon, g = # greedy actions, n = numIterations, k = numLegalActions
# g = n * [(1-e) + e/k] -> e = (n - g) / (n - n/k)
empiricalEpsilonNumerator = self.numIterations - numGreedyChoices
empiricalEpsilonDenominator = self.numIterations - self.numIterations / float(numLegalActions)
empiricalEpsilon = empiricalEpsilonNumerator / empiricalEpsilonDenominator
error = abs(empiricalEpsilon - self.epsilon)
if error > tolerance:
self.addMessage("Epsilon-greedy action selection is not correct.")
self.addMessage("Actual epsilon = %f; student empirical epsilon = %f; error = %f > tolerance = %f" % (self.epsilon, empiricalEpsilon, error, tolerance))
return False
return True
### q6
class Question6Test(testClasses.TestCase):
def __init__(self, question, testDict):
super(Question6Test, self).__init__(question, testDict)
def execute(self, grades, moduleDict, solutionDict):
studentSolution = moduleDict['analysis'].question6()
studentSolution = str(studentSolution).strip().lower()
hashedSolution = sha1(studentSolution).hexdigest()
if hashedSolution == '46729c96bb1e4081fdc81a8ff74b3e5db8fba415':
return self.testPass(grades)
else:
self.addMessage("Solution is not correct.")
self.addMessage(" Student solution: %s" % (studentSolution,))
return self.testFail(grades)
def writeSolution(self, moduleDict, filePath):
handle = open(filePath, 'w')
handle.write('# This is the solution file for %s.\n' % self.path)
handle.write('# File intentionally blank.\n')
handle.close()
return True
### q7/q8
### =====
## Average wins of a pacman agent
class EvalAgentTest(testClasses.TestCase):
def __init__(self, question, testDict):
super(EvalAgentTest, self).__init__(question, testDict)
self.pacmanParams = testDict['pacmanParams']
self.scoreMinimum = int(testDict['scoreMinimum']) if 'scoreMinimum' in testDict else None
self.nonTimeoutMinimum = int(testDict['nonTimeoutMinimum']) if 'nonTimeoutMinimum' in testDict else None
self.winsMinimum = int(testDict['winsMinimum']) if 'winsMinimum' in testDict else None
self.scoreThresholds = [int(s) for s in testDict.get('scoreThresholds','').split()]
self.nonTimeoutThresholds = [int(s) for s in testDict.get('nonTimeoutThresholds','').split()]
self.winsThresholds = [int(s) for s in testDict.get('winsThresholds','').split()]
self.maxPoints = sum([len(t) for t in [self.scoreThresholds, self.nonTimeoutThresholds, self.winsThresholds]])
def execute(self, grades, moduleDict, solutionDict):
self.addMessage('Grading agent using command: python pacman.py %s'% (self.pacmanParams,))
startTime = time.time()
games = pacman.runGames(** pacman.readCommand(self.pacmanParams.split(' ')))
totalTime = time.time() - startTime
numGames = len(games)
stats = {'time': totalTime, 'wins': [g.state.isWin() for g in games].count(True),
'games': games, 'scores': [g.state.getScore() for g in games],
'timeouts': [g.agentTimeout for g in games].count(True), 'crashes': [g.agentCrashed for g in games].count(True)}
averageScore = sum(stats['scores']) / float(len(stats['scores']))
nonTimeouts = numGames - stats['timeouts']
wins = stats['wins']
def gradeThreshold(value, minimum, thresholds, name):
points = 0
passed = (minimum == None) or (value >= minimum)
if passed:
for t in thresholds:
if value >= t:
points += 1
return (passed, points, value, minimum, thresholds, name)
results = [gradeThreshold(averageScore, self.scoreMinimum, self.scoreThresholds, "average score"),
gradeThreshold(nonTimeouts, self.nonTimeoutMinimum, self.nonTimeoutThresholds, "games not timed out"),
gradeThreshold(wins, self.winsMinimum, self.winsThresholds, "wins")]
totalPoints = 0
for passed, points, value, minimum, thresholds, name in results:
if minimum == None and len(thresholds)==0:
continue
# print passed, points, value, minimum, thresholds, name
totalPoints += points
if not passed:
assert points == 0
self.addMessage("%s %s (fail: below minimum value %s)" % (value, name, minimum))
else:
self.addMessage("%s %s (%s of %s points)" % (value, name, points, len(thresholds)))
if minimum != None:
self.addMessage(" Grading scheme:")
self.addMessage(" < %s: fail" % (minimum,))
if len(thresholds)==0 or minimum != thresholds[0]:
self.addMessage(" >= %s: 0 points" % (minimum,))
for idx, threshold in enumerate(thresholds):
self.addMessage(" >= %s: %s points" % (threshold, idx+1))
elif len(thresholds) > 0:
self.addMessage(" Grading scheme:")
self.addMessage(" < %s: 0 points" % (thresholds[0],))
for idx, threshold in enumerate(thresholds):
self.addMessage(" >= %s: %s points" % (threshold, idx+1))
if any([not passed for passed, _, _, _, _, _ in results]):
totalPoints = 0
return self.testPartial(grades, totalPoints, self.maxPoints)
def writeSolution(self, moduleDict, filePath):
with open(filePath, 'w') as handle:
handle.write('# This is the solution file for %s.\n' % self.path)
handle.write('# File intentionally blank.\n')
return True
### q2/q3
### =====
## For each parameter setting, compute the optimal policy, see if it satisfies some properties
def followPath(policy, start, numSteps=100):
state = start
path = []
for i in range(numSteps):
if state not in policy:
break
action = policy[state]
path.append("(%s,%s)" % state)
if action == 'north': nextState = state[0],state[1]+1
if action == 'south': nextState = state[0],state[1]-1
if action == 'east': nextState = state[0]+1,state[1]
if action == 'west': nextState = state[0]-1,state[1]
if action == 'exit' or action == None:
path.append('TERMINAL_STATE')
break
state = nextState
return path
def parseGrid(string):
grid = [[entry.strip() for entry in line.split()] for line in string.split('\n')]
for row in grid:
for x, col in enumerate(row):
try:
col = int(col)
except:
pass
if col == "_":
col = ' '
row[x] = col
return gridworld.makeGrid(grid)
def computePolicy(moduleDict, grid, discount):
valueIterator = moduleDict['valueIterationAgents'].ValueIterationAgent(grid, discount=discount)
policy = {}
for state in grid.getStates():
policy[state] = valueIterator.computeActionFromValues(state)
return policy
class GridPolicyTest(testClasses.TestCase):
def __init__(self, question, testDict):
super(GridPolicyTest, self).__init__(question, testDict)
# Function in module in analysis that returns (discount, noise)
self.parameterFn = testDict['parameterFn']
self.question2 = testDict.get('question2', 'false').lower() == 'true'
# GridWorld specification
# _ is empty space
# numbers are terminal states with that value
# # is a wall
# S is a start state
#
self.gridText = testDict['grid']
self.grid = gridworld.Gridworld(parseGrid(testDict['grid']))
self.gridName = testDict['gridName']
# Policy specification
# _ policy choice not checked
# N, E, S, W policy action must be north, east, south, west
#
self.policy = parseGrid(testDict['policy'])
# State the most probable path must visit
# (x,y) for a particular location; (0,0) is bottom left
# terminal for the terminal state
self.pathVisits = testDict.get('pathVisits', None)
# State the most probable path must not visit
# (x,y) for a particular location; (0,0) is bottom left
# terminal for the terminal state
self.pathNotVisits = testDict.get('pathNotVisits', None)
def execute(self, grades, moduleDict, solutionDict):
if not hasattr(moduleDict['analysis'], self.parameterFn):
self.addMessage('Method not implemented: analysis.%s' % (self.parameterFn,))
return self.testFail(grades)
result = getattr(moduleDict['analysis'], self.parameterFn)()
if type(result) == str and result.lower()[0:3] == "not":
self.addMessage('Actually, it is possible!')
return self.testFail(grades)
if self.question2:
livingReward = None
try:
discount, noise = result
discount = float(discount)
noise = float(noise)
except:
self.addMessage('Did not return a (discount, noise) pair; instead analysis.%s returned: %s' % (self.parameterFn, result))
return self.testFail(grades)
if discount != 0.9 and noise != 0.2:
self.addMessage('Must change either the discount or the noise, not both. Returned (discount, noise) = %s' % (result,))
return self.testFail(grades)
else:
try:
discount, noise, livingReward = result
discount = float(discount)
noise = float(noise)
livingReward = float(livingReward)
except:
self.addMessage('Did not return a (discount, noise, living reward) triple; instead analysis.%s returned: %s' % (self.parameterFn, result))
return self.testFail(grades)
self.grid.setNoise(noise)
if livingReward != None:
self.grid.setLivingReward(livingReward)
start = self.grid.getStartState()
policy = computePolicy(moduleDict, self.grid, discount)
## check policy
actionMap = {'N': 'north', 'E': 'east', 'S': 'south', 'W': 'west', 'X': 'exit'}
width, height = self.policy.width, self.policy.height
policyPassed = True
for x in range(width):
for y in range(height):
if self.policy[x][y] in actionMap and policy[(x,y)] != actionMap[self.policy[x][y]]:
differPoint = (x,y)
policyPassed = False
if not policyPassed:
self.addMessage('Policy not correct.')
self.addMessage(' Student policy at %s: %s' % (differPoint, policy[differPoint]))
self.addMessage(' Correct policy at %s: %s' % (differPoint, actionMap[self.policy[differPoint[0]][differPoint[1]]]))
self.addMessage(' Student policy:')
self.printPolicy(policy, False)
self.addMessage(" Legend: N,S,E,W at states which move north etc, X at states which exit,")
self.addMessage(" . at states where the policy is not defined (e.g. walls)")
self.addMessage(' Correct policy specification:')
self.printPolicy(self.policy, True)
self.addMessage(" Legend: N,S,E,W for states in which the student policy must move north etc,")
self.addMessage(" _ for states where it doesn't matter what the student policy does.")
self.printGridworld()
return self.testFail(grades)
## check path
path = followPath(policy, self.grid.getStartState())
if self.pathVisits != None and self.pathVisits not in path:
self.addMessage('Policy does not visit state %s when moving without noise.' % (self.pathVisits,))
self.addMessage(' States visited: %s' % (path,))
self.addMessage(' Student policy:')
self.printPolicy(policy, False)
self.addMessage(" Legend: N,S,E,W at states which move north etc, X at states which exit,")
self.addMessage(" . at states where policy not defined")
self.printGridworld()
return self.testFail(grades)
if self.pathNotVisits != None and self.pathNotVisits in path:
self.addMessage('Policy visits state %s when moving without noise.' % (self.pathNotVisits,))
self.addMessage(' States visited: %s' % (path,))
self.addMessage(' Student policy:')
self.printPolicy(policy, False)
self.addMessage(" Legend: N,S,E,W at states which move north etc, X at states which exit,")
self.addMessage(" . at states where policy not defined")
self.printGridworld()
return self.testFail(grades)
return self.testPass(grades)
def printGridworld(self):
self.addMessage(' Gridworld:')
for line in self.gridText.split('\n'):
self.addMessage(' ' + line)
self.addMessage(' Legend: # wall, _ empty, S start, numbers terminal states with that reward.')
def printPolicy(self, policy, policyTypeIsGrid):
if policyTypeIsGrid:
legend = {'N': 'N', 'E': 'E', 'S': 'S', 'W': 'W', ' ': '_'}
else:
legend = {'north': 'N', 'east': 'E', 'south': 'S', 'west': 'W', 'exit': 'X', '.': '.', ' ': '_'}
for ybar in range(self.grid.grid.height):
y = self.grid.grid.height-1-ybar
if policyTypeIsGrid:
self.addMessage(" %s" % (" ".join([legend[policy[x][y]] for x in range(self.grid.grid.width)]),))
else:
self.addMessage(" %s" % (" ".join([legend[policy.get((x,y), '.')] for x in range(self.grid.grid.width)]),))
# for state in sorted(self.grid.getStates()):
# if state != 'TERMINAL_STATE':
# self.addMessage(' (%s,%s) %s' % (state[0], state[1], policy[state]))
def writeSolution(self, moduleDict, filePath):
with open(filePath, 'w') as handle:
handle.write('# This is the solution file for %s.\n' % self.path)
handle.write('# File intentionally blank.\n')
return True
| 47.483801
| 168
| 0.606664
|
1515c809d42641470e8ca1c3f94d59a5fa9d3b7a
| 591
|
py
|
Python
|
uliweb/mail/backends/gmail.py
|
limodou/uliweb3
|
560fe818047c8ee8b4b775e714d9c637f0d23651
|
[
"BSD-2-Clause"
] | 16
|
2018-09-12T02:50:28.000Z
|
2021-08-20T08:38:31.000Z
|
uliweb/mail/backends/gmail.py
|
TommyLemon/uliweb3
|
3c92763d3172b9f1041ea93816daf4224c8512c0
|
[
"BSD-2-Clause"
] | 21
|
2018-11-29T06:41:08.000Z
|
2022-01-18T13:27:38.000Z
|
uliweb/mail/backends/gmail.py
|
TommyLemon/uliweb3
|
3c92763d3172b9f1041ea93816daf4224c8512c0
|
[
"BSD-2-Clause"
] | 1
|
2018-11-30T03:08:28.000Z
|
2018-11-30T03:08:28.000Z
|
import smtplib
from .smtp import MailConnection as SmtpMailConnection
class MailConnection(SmtpMailConnection):
def login(self):
if self.mail_obj.user:
self.server.ehlo()
self.server.starttls()
self.server.ehlo()
self.server.login(self.mail_obj.user, self.mail_obj.password)
def get_connection(self):
if not self.server:
self.server = server = smtplib.SMTP()
self.server.connect(self.mail_obj.host or 'smtp.gmail.com', self.mail_obj.port or 587)
self.login()
| 32.833333
| 99
| 0.617597
|
0156119d567737cdf438ef14e8a4bf7e7f9c0788
| 1,463
|
py
|
Python
|
SignIn.py
|
Oumourin/Im-Fine-Thank-You
|
66fa6f2d099191267a06b26dd9a3493ad6f29b4d
|
[
"MIT"
] | null | null | null |
SignIn.py
|
Oumourin/Im-Fine-Thank-You
|
66fa6f2d099191267a06b26dd9a3493ad6f29b4d
|
[
"MIT"
] | null | null | null |
SignIn.py
|
Oumourin/Im-Fine-Thank-You
|
66fa6f2d099191267a06b26dd9a3493ad6f29b4d
|
[
"MIT"
] | null | null | null |
import requests
import random
from bs4 import BeautifulSoup
import PushMessage
import LoadConfig
# 登录页面URL
get_url = "http://39.98.190.134:81/Report/Reported"
# 登录请求URL
sign_up_url = "http://39.98.190.134:81/Account/Login"
# 申请短信验证码URL
mobile_phone_code_url = "http://39.98.190.134:81/Account/GetLoginMobileCode"
# 提交日常数据URL
post_daily_data_url = "http://39.98.190.134:81/Report"
# 打卡结果获取URL
get_checkin_result_url = "http://39.98.190.134:81/Report/Success"
# 伪造请求头
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36"
}
# r = requests.get(get_url, headers=headers, cookies=my_cookies)
# 获取一个随机体温值
get_random = random.randint(355, 365)
get_my_temperature = get_random / 10.0
# Json解析
config_data = LoadConfig.load_config_json()
my_cookies = config_data['cookies']
my_daily_data = config_data['data']
my_daily_data['Temperature'] = str(get_my_temperature)
# 获取连接
request = requests.request('GET', get_url, cookies=my_cookies, headers=headers)
# 提交数据
post = requests.request('POST', post_daily_data_url, data=my_daily_data, cookies=my_cookies)
# 获取结果
result_request = requests.request('GET', get_checkin_result_url, cookies=my_cookies, headers=headers)
soup = BeautifulSoup(result_request.content, 'lxml')
result_string = soup.find('h2').text
if result_string == '打卡成功':
PushMessage.push_message("今日签到成功!")
else:
PushMessage.push_message("今日签到失败!")
| 27.092593
| 135
| 0.760766
|
c08b9fb718a02b6951c05960ef10832b38de0bee
| 17,241
|
py
|
Python
|
python/sn_toolbars.py
|
JasonReek/SmartNotes
|
4be5fdf98d4c5bc4b768da3c8872ce19340de4bf
|
[
"MIT"
] | 1
|
2020-09-29T00:57:39.000Z
|
2020-09-29T00:57:39.000Z
|
python/sn_toolbars.py
|
JasonReek/SmartNotes
|
4be5fdf98d4c5bc4b768da3c8872ce19340de4bf
|
[
"MIT"
] | null | null | null |
python/sn_toolbars.py
|
JasonReek/SmartNotes
|
4be5fdf98d4c5bc4b768da3c8872ce19340de4bf
|
[
"MIT"
] | null | null | null |
from PySide2.QtWidgets import (QApplication, QMainWindow, QLineEdit, QDialog, QAction, QFormLayout, QMessageBox, QTextEdit, QDockWidget, QMenu, QComboBox, QFrame, QListWidget,
QTabWidget, QAbstractItemView, QActionGroup, QColorDialog, QToolBar, QFontComboBox, QVBoxLayout, QGridLayout, QWidget, QLabel, QPushButton, QAction)
from PySide2.QtGui import (QTextCharFormat, QIntValidator, QKeySequence, QBrush, QTextListFormat, QFont, QColor, QIcon, QPixmap, QColor)
from PySide2.QtCore import (Qt)
from sn_widgets import VBreakLine
import os
import re
class HighlighterButton(QPushButton):
def __init__(self, color="yellow"):
super().__init__()
self.color = color
self.setStyleSheet("QPushButton{background-color: "+color+"; width: 8; height: 8;}")
self.setToolTip(str(color))
def setColor(self, color):
self.setStyleSheet("QPushButton{background-color: "+color+"; width: 8; height: 8;}")
self.setToolTip(str(color))
class LogicSymbolToolbar(QToolBar):
def __init__(self, title="Logic Symbol Toolbar", parent=None):
super().__init__(title, parent)
self._parent = parent
self.setObjectName("logictoolbar")
self.setStyleSheet("""
QWidget[objectName^="logictoolbar"]{background-color: #777777;}
QPushButton{background-color: #777777;}
QPushButton:hover{background-color: #999999;}
QPushButton:pressed{background-color: #555555;}
QToolButton{background-color: #777777;}
QToolButton:hover{background-color: #999999;}
QToolButon:pressed{background-color: #555555;}
""")
self.addWidget(QLabel("Logic Symbols:"))
# Conjunction
self.conjunctionAction = QAction('&&', self)
self.conjunctionAction.setToolTip('insert a conjunction "and"')
self.conjunctionAction.triggered.connect(lambda: self._parent.activeNotepad().insertPlainText(r'&'))
self.addAction(self.conjunctionAction)
# Disjunction
self.disjunctionAction = QAction('∨', self)
self.disjunctionAction.setToolTip('insert a disjunction "or"')
self.disjunctionAction.triggered.connect(lambda: self._parent.activeNotepad().insertPlainText('∨'))
self.addAction(self.disjunctionAction)
# Negation
self.negationAction = QAction('~', self)
self.negationAction.setToolTip('insert a negation "or"')
self.negationAction.triggered.connect(lambda: self._parent.activeNotepad().insertPlainText('~'))
self.addAction(self.negationAction)
# Conditional
self.conditionalAction = QAction('→', self)
self.conditionalAction.setToolTip('insert a conditional "if then"')
self.conditionalAction.triggered.connect(lambda: self._parent.activeNotepad().insertPlainText('→'))
self.addAction(self.conditionalAction)
# Biconditional
self.biconditionalAction = QAction('↔', self)
self.biconditionalAction.setToolTip('insert a biconditional "if and only if then"')
self.biconditionalAction.triggered.connect(lambda: self._parent.activeNotepad().insertPlainText('↔'))
self.addAction(self.biconditionalAction)
class FontToolBar(QToolBar):
def __init__(self, title="Font Toolbar", parent=None):
super().__init__(title, parent)
self._parent = parent
self.setObjectName("ftoolbar")
self.setStyleSheet("""
QWidget[objectName^="ftoolbar"]{background-color: #777777;}
QPushButton{background-color: #777777;}
QToolButton{background-color: #777777;};
""")
font = QFont()
font.setPointSize(12)
font.setFamily("Times New Roman")
self._font_families = QFontComboBox()
self._font_families.setCurrentFont(font)
self._font_families.currentFontChanged.connect(self.keepFontSize)
self._font_sizes = QComboBox()
self._font_sizes.setEditable(True)
validator = QIntValidator()
self._font_sizes.setValidator(validator)
FONT_SIZES = ['8', '9', '11', '12', '14', '16', '18', '20', '22', '24', '26', '28', '36', '48', '72']
self._font_sizes.addItems(FONT_SIZES)
self._font_sizes.activated.connect(self.changeFontSize)
self._font_sizes.setCurrentIndex(3)
self.addWidget(QLabel(" Font: "))
self.addWidget(self._font_families)
self.addWidget(self._font_sizes)
# Bold Button
self.bold_action = QAction(QIcon(os.path.join("images", "edit-bold.png")), "Bold", self)
self.bold_action.setStatusTip("Bold")
self.bold_action.setShortcut(QKeySequence.Bold)
self.bold_action.setCheckable(True)
self.addAction(self.bold_action)
# Italic Button
self.italic_action = QAction(QIcon(os.path.join("images", "edit-italic.png")), "Italic", self)
self.italic_action.setStatusTip("Italic")
self.italic_action.setShortcut(QKeySequence.Italic)
self.italic_action.setCheckable(True)
self.addAction(self.italic_action)
# Underline Button
self.underline_action = QAction(QIcon(os.path.join("images", "edit-underline.png")), "Underline", self)
self.underline_action.setStatusTip("Underline")
self.underline_action.setShortcut(QKeySequence.Underline)
self.underline_action.setCheckable(True)
self.addAction(self.underline_action)
self.addWidget(VBreakLine(self))
# Font Color Button
self.font_color_action = QAction(QIcon(os.path.join("images", "edit-color.png")), "Font Color", self)
self.font_color_button = HighlighterButton(color="#000000")
self.font_color_action.setStatusTip("Font Color")
self.font_color_action.triggered.connect(self.changeFontColor)
self.font_color_button.clicked.connect(self.changeFontColor)
self.addAction(self.font_color_action)
self.addWidget(self.font_color_button)
self.addWidget(VBreakLine(self))
# HighLighter Color Button
self.highlighter_action = QAction(QIcon(os.path.join("images", "edit-highlighter.png")), "Highlight Color")
self.highlighter_button = HighlighterButton(color="yellow")
self.highlighter_action.setStatusTip("Highlighter")
self.highlighter_action.triggered.connect(self.changeHighlighterColor)
self.highlighter_button.clicked.connect(self.changeHighlighterColor)
self.addAction(self.highlighter_action)
self.addWidget(self.highlighter_button)
self.addWidget(VBreakLine(self))
def keepFontSize(self):
font_size = int(self._font_sizes.currentText())
if self._font_families.currentFont().pointSize() != font_size:
font = QFont()
font.setPointSize(font_size)
self._font_families.setCurrentFont(font)
def connectNotepad(self, notedpad):
self._font_families.currentFontChanged.connect(notedpad.setCurrentFont)
self.bold_action.toggled.connect(lambda x: notedpad.setFontWeight(QFont.Bold if x else QFont.Normal))
self.italic_action.toggled.connect(notedpad.setFontItalic)
self.underline_action.toggled.connect(notedpad.setFontUnderline)
def changeFontSize(self):
font_format = QTextCharFormat()
font_size = int(self._font_sizes.currentText())
font_format.setFontPointSize(font_size)
cursor = self._parent.activeNotepad().textCursor()
cursor.mergeBlockCharFormat(font_format)
self._parent.activeNotepad().setTextCursor(cursor)
self._parent.activeNotepad().setFontPointSize(font_size)
def changeFontColor(self):
color_dialog = QColorDialog()
color = color_dialog.getColor()
hex_color = None
if color.isValid():
hex_color = color.name()
self.font_color_button.setColor(hex_color)
q_color = QColor(hex_color)
self._parent.activeNotepad().setTextColor(q_color)
def changeHighlighterColor(self):
color_dialog = QColorDialog()
color = color_dialog.getColor()
hex_color = None
if color.isValid():
hex_color = color.name()
self.highlighter_button.setColor(hex_color)
q_color = QColor(hex_color)
self._parent.activeNotepad().setTextBackgroundColor(q_color)
class AlignToolBar(QToolBar):
def __init__(self, title="Alignment Toolbar", parent=None):
super().__init__(title, parent)
self._parent = parent
self.setObjectName("alitoolbar")
self.setStyleSheet("""
QWidget[objectName^="alitoolbar"]{background-color: #777777;}
QPushButton{background-color: #777777;}
QToolButton{background-color: #777777;};
""")
# ALIGNMENT FORMATTING
#*********************
# Align Actions
#------------------------------------------------------------
self.alignl_action = QAction(QIcon(os.path.join('images', 'edit-alignment.png')), "Align left", self)
self.alignc_action = QAction(QIcon(os.path.join('images', 'edit-alignment-center.png')), "Align center", self)
self.alignr_action = QAction(QIcon(os.path.join('images', 'edit-alignment-right.png')), "Align right", self)
self.alignj_action = QAction(QIcon(os.path.join('images', 'edit-alignment-justify.png')), "Justify", self)
# Align Settings
#------------------------------------------------------------
# Align Left
self.alignl_action.setStatusTip("Align text left")
self.alignl_action.setCheckable(True)
self.alignl_action.toggled.connect(lambda toggled: self._parent.activeNotepad().setAlignment(Qt.AlignLeft if toggled else Qt.AlignJustify))
# Align Center
self.alignc_action.setStatusTip("Align text center")
self.alignc_action.setCheckable(True)
self.alignc_action.toggled.connect(lambda toggled: self._parent.activeNotepad().setAlignment(Qt.AlignCenter if toggled else Qt.AlignLeft))
# Align Right
self.alignr_action.setStatusTip("Align text right")
self.alignr_action.setCheckable(True)
self.alignr_action.toggled.connect(lambda toggled: self._parent.activeNotepad().setAlignment(Qt.AlignRight if toggled else Qt.AlignLeft))
# Justify
self.alignj_action.setStatusTip("Justify text")
self.alignj_action.setCheckable(True)
self.alignj_action.toggled.connect(lambda toggled: self._parent.activeNotepad().setAlignment(Qt.AlignJustify if toggled else Qt.AlignLeft))
# Align Group
###############################################
self.align_group = QActionGroup(self)
self.align_group.setExclusionPolicy(QActionGroup.ExclusionPolicy.ExclusiveOptional)
self.align_group.addAction(self.alignl_action)
self.align_group.addAction(self.alignc_action)
self.align_group.addAction(self.alignr_action)
self.align_group.addAction(self.alignj_action)
# Add actions to the tool bar
self.addAction(self.alignl_action)
self.addAction(self.alignc_action)
self.addAction(self.alignr_action)
self.addAction(self.alignj_action)
###############################################
# LIST FORMATTING
#*****************
# List Actions
#------------------------------------------------------------
self.list_action = QAction(QIcon(os.path.join('images', 'edit-list.png')), "List", self)
self.ord_list_action = QAction(QIcon(os.path.join('images', 'edit-list-order.png')), "Ordered List", self)
# List Widgets
#------------------------------------------------------------
self.list_style_combo = QComboBox()
self.ord_list_style_combo = QComboBox()
# List Settings
#------------------------------------------------------------
# List
self.list_action.setStatusTip("Create list")
self.list_action.setCheckable(True)
self.list_action.toggled.connect(self.createList)
# List Style
list_styles = ["Disc", "Circle", "Square"]
self.list_style_combo.addItems(list_styles)
self.list_style_combo.activated.connect(self.changeListStyle)
# Ordered List
self.ord_list_action.setStatusTip("Create ordered list")
self.ord_list_action.setCheckable(True)
self.ord_list_action.toggled.connect(self.createOrdList)
# Ordered List Style
ord_list_styles = ["Decimal", "Lower Alpha", "Upper Alpha", "Lower Roman", "Upper Roman"]
self.ord_list_style_combo.addItems(ord_list_styles)
self.ord_list_style_combo.activated.connect(self.changeOrdListStyle)
# Align Group (and widgets)
###############################################
self.list_group = QActionGroup(self)
self.list_group.setExclusionPolicy(QActionGroup.ExclusionPolicy.ExclusiveOptional)
self.list_group.addAction(self.list_action)
self.list_group.addAction(self.ord_list_action)
# Add Actions and Widgets to the tool bar
self.addAction(self.list_action)
self.addWidget(self.list_style_combo)
self.addAction(self.ord_list_action)
self.addWidget(self.ord_list_style_combo)
###############################################
def createList(self, toggled):
cursor = self._parent.activeNotepad().textCursor()
list_format = QTextListFormat()
list_styles = {"Disc": QTextListFormat.ListDisc,
"Circle": QTextListFormat.ListCircle,
"Square": QTextListFormat.ListSquare}
style = list_styles[self.list_style_combo.currentText()]
if toggled:
list_format.setStyle(style)
cursor.createList(list_format)
self._parent.activeNotepad().setTextCursor(cursor)
else:
current_list = cursor.currentList()
if current_list:
list_format.setIndent(0)
list_format.setStyle(style)
current_list.setFormat(list_format)
for i in range(current_list.count()-1, -1, -1):
current_list.removeItem(i)
def changeListStyle(self):
cursor = self._parent.activeNotepad().textCursor()
current_list = cursor.currentList()
list_format = QTextListFormat()
list_styles = {"Disc": QTextListFormat.ListDisc,
"Circle": QTextListFormat.ListCircle,
"Square": QTextListFormat.ListSquare}
style = list_styles[self.list_style_combo.currentText()]
list_format.setStyle(style)
current_list.setFormat(list_format)
self._parent.activeNotepad().setTextCursor(cursor)
def createOrdList(self, toggled):
cursor = self._parent.activeNotepad().textCursor()
ord_list_format = QTextListFormat()
ord_list_styles = {"Decimal": QTextListFormat.ListDecimal,
"Lower Alpha": QTextListFormat.ListLowerAlpha,
"Upper Alpha": QTextListFormat.ListUpperAlpha,
"Lower Roman": QTextListFormat.ListLowerRoman,
"Upper Roman": QTextListFormat.ListUpperRoman}
style = ord_list_styles[self.ord_list_style_combo.currentText()]
if toggled:
ord_list_format.setStyle(style)
cursor.createList(ord_list_format)
self._parent.activeNotepad().setTextCursor(cursor)
else:
current_list = cursor.currentList()
if current_list:
ord_list_format.setIndent(0)
ord_list_format.setStyle(style)
current_list.setFormat(ord_list_format)
for i in range(current_list.count()-1, -1, -1):
current_list.removeItem(i)
def changeOrdListStyle(self):
cursor = self._parent.activeNotepad().textCursor()
current_list = cursor.currentList()
list_format = QTextListFormat()
ord_list_styles = {"Decimal": QTextListFormat.ListDecimal,
"Lower Alpha": QTextListFormat.ListLowerAlpha,
"Upper Alpha": QTextListFormat.ListUpperAlpha,
"Lower Roman": QTextListFormat.ListLowerRoman,
"Upper Roman": QTextListFormat.ListUpperRoman}
style = ord_list_styles[self.ord_list_style_combo.currentText()]
list_format.setStyle(style)
current_list.setFormat(list_format)
self._parent.activeNotepad().setTextCursor(cursor)
| 46.346774
| 180
| 0.627922
|
52ad34564d3f555cdac77e1afe950f8504b4b7be
| 15,505
|
py
|
Python
|
bureau/stats/views.py
|
clairempr/bureau
|
c9fd114e637829b4e9ff643459d15602cc2efc2f
|
[
"Apache-2.0"
] | 1
|
2019-02-15T09:05:35.000Z
|
2019-02-15T09:05:35.000Z
|
bureau/stats/views.py
|
clairempr/bureau
|
c9fd114e637829b4e9ff643459d15602cc2efc2f
|
[
"Apache-2.0"
] | null | null | null |
bureau/stats/views.py
|
clairempr/bureau
|
c9fd114e637829b4e9ff643459d15602cc2efc2f
|
[
"Apache-2.0"
] | null | null | null |
from django.db.models import Case, CharField, Count, F, FloatField, Q, Value, When
from django.db.models.functions import Cast
from django.views.generic.base import TemplateView
from medical.models import Ailment, AilmentType
from personnel.models import Employee
from places.models import Place, Region
from places.settings import GERMANY_COUNTRY_NAME, GERMANY_COUNTRY_NAMES, VIRGINIA_REGION_NAME, VIRGINIA_REGION_NAMES
from stats.utils import get_ages_at_death, get_ages_in_year, get_mean, get_median, get_percent
class GeneralView(TemplateView):
template_name = 'stats/general.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['employee_count'] = Employee.objects.count()
context['colored_count'] = Employee.objects.filter(colored=True).count()
context['confederate_count'] = Employee.objects.filter(confederate_veteran=True).count()
context['female_count'] = Employee.objects.filter(gender=Employee.FEMALE).count()
context['vrc_count'] = Employee.objects.filter(vrc=True).count()
return context
general_view = GeneralView.as_view()
class DetailedView(TemplateView):
template_name = 'stats/detailed.html'
def get_context_data(self, **kwargs):
employee_count = Employee.objects.count()
# Employees with date of birth filled
employees_with_dob = Employee.objects.exclude(date_of_birth='')
# Employees with date of death filled
employees_with_dob_and_dod = Employee.objects.exclude(date_of_death='').exclude(date_of_birth='')
# Age in 1865
ages_vrc = get_ages_in_year(employees_with_dob.filter(vrc=True), 1865)
ages_non_vrc = get_ages_in_year(employees_with_dob.filter(vrc=False), 1865)
ages_usct = get_ages_in_year(employees_with_dob.intersection(Employee.objects.usct()), 1865)
ages_everyone = ages_vrc + ages_non_vrc
average_age_in_1865 = {'vrc': get_mean(ages_vrc),
'non_vrc': get_mean(ages_non_vrc),
'usct': get_mean(ages_usct),
'everyone': get_mean(ages_everyone)}
median_age_in_1865 = {'vrc': get_median(ages_vrc),
'non_vrc': get_median(ages_non_vrc),
'usct': get_median(ages_usct),
'everyone': get_median(ages_everyone)}
# Age at time of death
ages_vrc_at_death = get_ages_at_death(employees_with_dob_and_dod.filter(vrc=True))
ages_non_vrc_at_death = get_ages_at_death(employees_with_dob_and_dod.filter(vrc=False))
ages_usct_at_death = get_ages_at_death(employees_with_dob_and_dod.intersection(Employee.objects.usct()))
ages_everyone_at_death = ages_vrc_at_death + ages_non_vrc_at_death
average_age_at_death ={'vrc': get_mean(ages_vrc_at_death),
'non_vrc': get_mean(ages_non_vrc_at_death),
'usct': get_mean(ages_usct_at_death),
'everyone': get_mean(ages_everyone_at_death)}
median_age_at_death = {'vrc': get_median(ages_vrc_at_death),
'non_vrc': get_median(ages_non_vrc_at_death),
'usct': get_median(ages_usct_at_death),
'everyone': get_median(ages_everyone_at_death)}
# Foreign born
foreign_born_vrc = Employee.objects.foreign_born(vrc=True).count()
foreign_born_non_vrc = Employee.objects.foreign_born(vrc=False).count()
foreign_born_usct = Employee.objects.foreign_born().intersection(Employee.objects.usct()).count()
foreign_born = {'vrc': get_percent(foreign_born_vrc, Employee.objects.birthplace_known(vrc=True).count()),
'non_vrc': get_percent(
foreign_born_non_vrc, Employee.objects.birthplace_known(vrc=False).count()),
'usct': get_percent(foreign_born_usct, Employee.objects.birthplace_known().intersection(
Employee.objects.usct()).count()),
'everyone': get_percent(
(foreign_born_vrc + foreign_born_non_vrc), Employee.objects.birthplace_known().count())}
# Top places where employees were born or died, with certain places grouped together
top_birthplaces = get_top_birthplaces(number=25)
top_deathplaces = get_top_deathplaces(number=25)
# Ailments
ailments = []
for ailment in Ailment.objects.all():
ages_at_death = get_ages_at_death(employees_with_dob_and_dod.filter(ailments=ailment))
ailments.append(
{'name': ailment.name,
'vrc': get_percent(Employee.objects.vrc(ailments=ailment).count(), Employee.objects.vrc().count()),
'non_vrc': get_percent(Employee.objects.non_vrc(
ailments=ailment).count(), Employee.objects.non_vrc().count()),
'usct': get_percent(Employee.objects.usct(ailments=ailment).count(), Employee.objects.usct().count()),
'everyone': get_percent(Employee.objects.filter(ailments=ailment).count(), employee_count),
'average_age_at_death': get_mean(ages_at_death),
'median_age_at_death': get_median(ages_at_death)})
ages_at_death = get_ages_at_death(employees_with_dob_and_dod.filter(ailments=None))
ailments.append({'name': 'None',
'vrc': get_percent(Employee.objects.vrc(ailments=None).count(),
Employee.objects.vrc().count()),
'non_vrc': get_percent(Employee.objects.non_vrc(ailments=None).count(),
Employee.objects.non_vrc().count()),
'usct': get_percent(Employee.objects.usct(ailments=None).count(),
Employee.objects.usct().count()),
'everyone': get_percent(Employee.objects.filter(ailments=None).count(), employee_count),
'average_age_at_death': get_mean(ages_at_death),
'median_age_at_death': get_median(ages_at_death)})
context = super().get_context_data(**kwargs)
context['average_age_in_1865'] = average_age_in_1865
context['median_age_in_1865'] = median_age_in_1865
context['average_age_at_death'] = average_age_at_death
context['median_age_at_death'] = median_age_at_death
context['foreign_born'] = foreign_born
context['top_birthplaces'] = top_birthplaces
context['top_deathplaces'] = top_deathplaces
context['ailments'] = ailments
return context
detailed_view = DetailedView.as_view()
def get_top_birthplaces(number=25):
"""
Return top places where employees were born
Group Germany, Prussia, Bavaria, and Saxony, etc. together, because of inconsistencies in reporting of German
places in the sources
Group Virginia and West Virginia together, because it was all Virginia when they were born
"""
top_birthplaces = Place.objects.annotate(
annotated_country=Case(
When(country__name__in=GERMANY_COUNTRY_NAMES, then=Value(GERMANY_COUNTRY_NAME)),
default=F('country__name'), output_field=CharField(),
),
annotated_region=Case(
When(region__name__in=VIRGINIA_REGION_NAMES, then=Value(VIRGINIA_REGION_NAME)),
default=F('region__name'), output_field=CharField(),
),
).values_list('annotated_region', 'annotated_country').annotate(
num_employees=Count('employees_born_in')).order_by('-num_employees')[:number]
return get_places_with_pks_for_context(top_birthplaces)
def get_top_deathplaces(number=25):
"""
Return top places where employees died
Group Germany, Prussia, Bavaria, and Saxony, etc. together, because of inconsistencies in reporting of German
places in the sources
"""
top_deathplaces = Place.objects.annotate(
annotated_country=Case(
When(country__name__in=GERMANY_COUNTRY_NAMES, then=Value(GERMANY_COUNTRY_NAME)),
default=F('country__name'), output_field=CharField(),
),
).values_list('region__name', 'annotated_country').annotate(
num_employees=Count('employees_died_in')).order_by('-num_employees')[:number]
return get_places_with_pks_for_context(top_deathplaces)
def get_places_with_pks_for_context(place_names_and_counts):
"""
Take list of place names (country or region) and counts, get the corresponding Place,
and return list of names, pks, and counts
"""
context_places = []
for (region, country, count) in place_names_and_counts:
if region:
place_pk = Place.objects.filter(region__name=region, county__name__isnull=True,
city__name__isnull=True).first().pk
else:
place_pk = Place.objects.filter(country__name=country, region__name__isnull=True,
county__name__isnull=True, city__name__isnull=True).first().pk
context_places.append((region if region else country, place_pk, count))
return context_places
class StateComparisonView(TemplateView):
template_name = 'stats/state_comparison.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
stats = []
total_employees = Region.objects.bureau_state().annotate(
total=Cast(Count('employee_employed'), FloatField()))
# Top employee count
top_total = total_employees.annotate(value=F('total')).exclude(value=0).order_by('-value')[:5]
stats.append(('Employee count', top_total))
# Top % VRC employees
top_vrc_percent = total_employees.annotate(
value=Cast(Count('employee_employed', filter=Q(employee_employed__vrc=True)), FloatField()) / F(
'total') * 100).exclude(value=0).order_by('-value')[:5]
stats.append(('% VRC employees', top_vrc_percent))
# Top % USCT employees
top_usct_percent = total_employees.annotate(
value=Cast(Count('employee_employed', filter=Q(employee_employed__in=Employee.objects.usct())),
FloatField()) / F('total') * 100).exclude(value=0).order_by('-value')[:5]
stats.append(('% USCT employees', top_usct_percent))
if Employee.objects.birthplace_known().exists():
# Top % foreign-born employees
top_foreign_born_percent = total_employees.annotate(
value=Cast(Count('employee_employed', filter=Q(employee_employed__in=Employee.objects.foreign_born())),
FloatField()) / Cast(Count('employee_employed',
filter=Q(employee_employed__in=Employee.objects.birthplace_known())),
FloatField()) * 100).exclude(value=0).order_by('-value')[:5]
stats.append(('% Foreign-born employees', top_foreign_born_percent))
# Top % employees born in that state
top_born_in_state_percent = total_employees.annotate(
value=Cast(Count('employee_employed', filter=Q(employee_employed__place_of_birth__region__id=F('id'))),
FloatField()) / Cast(Count('employee_employed',
filter=Q(employee_employed__in=Employee.objects.birthplace_known())),
FloatField()) * 100).exclude(value=0).order_by('-value')[:5]
stats.append(('% Employees born there', top_born_in_state_percent))
# Top % female employees
top_female_percent = total_employees.annotate(
value=Cast(Count('employee_employed', filter=Q(employee_employed__gender=Employee.FEMALE)),
FloatField()) / F('total') * 100).exclude(value=0).order_by('-value')[:5]
stats.append(('% Female employees', top_female_percent))
# Top % employees who died during assignment
top_died_during_assignment_percent = total_employees.annotate(
value=Cast(Count('employee_employed', filter=Q(employee_employed__died_during_assignment=True)),
FloatField()) / F('total') * 100).exclude(value=0).order_by('-value')[:5]
stats.append(('% Employees who died during assignment', top_died_during_assignment_percent))
# Top % employees identified as "colored"
top_colored_percent = total_employees.annotate(
value=Cast(Count('employee_employed', filter=Q(employee_employed__colored=True)),
FloatField()) / F('total') * 100).exclude(value=0).order_by('-value')[:5]
stats.append(('% Employees identified as "colored"', top_colored_percent))
# Top # former slave employees
top_former_slave_percent = total_employees.annotate(
value=Count('employee_employed', filter=Q(employee_employed__former_slave=True))).exclude(
value=0).order_by('-value')[:5]
stats.append(('Former slave employees', top_former_slave_percent))
# Top % former slaveholder employees
top_slaveholder_percent = total_employees.annotate(
value=Cast(Count('employee_employed', filter=Q(employee_employed__slaveholder=True)),
FloatField()) / F('total') * 100).exclude(value=0).order_by('-value')[:5]
stats.append(('% Former slaveholder employees', top_slaveholder_percent))
# Top % ex-Confederate employees
top_confederate_percent = total_employees.annotate(
value=Cast(Count('employee_employed', filter=Q(employee_employed__confederate_veteran=True)),
FloatField()) / F('total') * 100).exclude(value=0).order_by('-value')[:5]
stats.append(('% Ex-Confederate employees', top_confederate_percent))
# Top # left-hand penmanship contest entrants
top_penmanship_contest = total_employees.annotate(
value=Count('employee_employed', filter=Q(employee_employed__penmanship_contest=True))).exclude(
value=0).order_by('-value')[:5]
stats.append(('Left-hand penmanship contest entrants', top_penmanship_contest))
# Breakdown per AilmentType
for ailment_type in AilmentType.objects.all():
top_ailment_type_percent = total_employees.annotate(
value=Cast(Count('employee_employed', filter=Q(employee_employed__ailments__type=ailment_type)),
FloatField()) / F('total') * 100).exclude(value=0).order_by('-value')[:5]
stats.append(('% With {}'.format(ailment_type), top_ailment_type_percent))
# Breakdown per Ailment, if more than one for the type
if ailment_type.ailments.count() > 1:
for ailment in ailment_type.ailments.all():
top_ailment_percent = total_employees.annotate(
value=Cast(Count('employee_employed', filter=Q(employee_employed__ailments=ailment)),
FloatField()) / F('total') * 100).exclude(value=0).order_by('-value')[:5]
stats.append(('% With {}'.format(ailment), top_ailment_percent))
context['stats'] = stats
return context
state_comparison_view = StateComparisonView.as_view()
| 53.099315
| 123
| 0.652628
|
b4bf6409c234a4911f375f4a8ee10171e14a38b0
| 5,150
|
py
|
Python
|
azure-iot-device/azure/iot/device/common/pipeline/pipeline_stages_http.py
|
YoDaMa/azure-iot-sdk-python
|
8eb008aba95a0e611aaa034647226a2af65605d2
|
[
"MIT"
] | null | null | null |
azure-iot-device/azure/iot/device/common/pipeline/pipeline_stages_http.py
|
YoDaMa/azure-iot-sdk-python
|
8eb008aba95a0e611aaa034647226a2af65605d2
|
[
"MIT"
] | null | null | null |
azure-iot-device/azure/iot/device/common/pipeline/pipeline_stages_http.py
|
YoDaMa/azure-iot-sdk-python
|
8eb008aba95a0e611aaa034647226a2af65605d2
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import logging
import six
import traceback
import copy
from . import (
pipeline_ops_base,
PipelineStage,
pipeline_ops_http,
pipeline_thread,
pipeline_exceptions,
)
from azure.iot.device.common.http_transport import HTTPTransport
from azure.iot.device.common import handle_exceptions, transport_exceptions
from azure.iot.device.common.callable_weak_method import CallableWeakMethod
logger = logging.getLogger(__name__)
class HTTPTransportStage(PipelineStage):
"""
PipelineStage object which is responsible for interfacing with the HTTP protocol wrapper object.
This stage handles all HTTP operations that are not specific to IoT Hub.
"""
def __init__(self):
super(HTTPTransportStage, self).__init__()
# The sas_token will be set when Connetion Args are received
self.sas_token = None
# The transport will be instantiated when Connection Args are received
self.transport = None
@pipeline_thread.runs_on_pipeline_thread
def _run_op(self, op):
if isinstance(op, pipeline_ops_base.InitializePipelineOperation):
# If there is a gateway hostname, use that as the hostname for connection,
# rather than the hostname itself
if self.pipeline_root.pipeline_configuration.gateway_hostname:
logger.debug(
"Gateway Hostname Present. Setting Hostname to: {}".format(
self.pipeline_root.pipeline_configuration.gateway_hostname
)
)
hostname = self.pipeline_root.pipeline_configuration.gateway_hostname
else:
logger.debug(
"Gateway Hostname not present. Setting Hostname to: {}".format(
self.pipeline_root.pipeline_configuration.hostname
)
)
hostname = self.pipeline_root.pipeline_configuration.hostname
# Create HTTP Transport
logger.debug("{}({}): got connection args".format(self.name, op.name))
self.transport = HTTPTransport(
hostname=hostname,
server_verification_cert=self.pipeline_root.pipeline_configuration.server_verification_cert,
x509_cert=self.pipeline_root.pipeline_configuration.x509,
cipher=self.pipeline_root.pipeline_configuration.cipher,
)
self.pipeline_root.transport = self.transport
op.complete()
elif isinstance(op, pipeline_ops_http.HTTPRequestAndResponseOperation):
# This will call down to the HTTP Transport with a request and also created a request callback. Because the HTTP Transport will run on the http transport thread, this call should be non-blocking to the pipline thread.
logger.debug(
"{}({}): Generating HTTP request and setting callback before completing.".format(
self.name, op.name
)
)
@pipeline_thread.invoke_on_pipeline_thread_nowait
def on_request_completed(error=None, response=None):
if error:
logger.error(
"{}({}): Error passed to on_request_completed. Error={}".format(
self.name, op.name, error
)
)
op.complete(error=error)
else:
logger.debug(
"{}({}): Request completed. Completing op.".format(self.name, op.name)
)
logger.debug("HTTP Response Status: {}".format(response["status_code"]))
logger.debug("HTTP Response: {}".format(response["resp"].decode("utf-8")))
op.response_body = response["resp"]
op.status_code = response["status_code"]
op.reason = response["reason"]
op.complete()
# A deepcopy is necessary here since otherwise the manipulation happening to
# http_headers will affect the op.headers, which would be an unintended side effect
# and not a good practice.
http_headers = copy.deepcopy(op.headers)
if self.pipeline_root.pipeline_configuration.sastoken:
http_headers["Authorization"] = str(
self.pipeline_root.pipeline_configuration.sastoken
)
self.transport.request(
method=op.method,
path=op.path,
headers=http_headers,
query_params=op.query_params,
body=op.body,
callback=on_request_completed,
)
else:
self.send_op_down(op)
| 42.916667
| 229
| 0.587767
|
616c749813108b637322b327604415a80ad25f4d
| 7,928
|
py
|
Python
|
tools/preprocess_data.py
|
ningchaoar/Megatron-LM
|
39196e776a2e9d1bfb11622b988ad7dc9c0f471e
|
[
"MIT"
] | null | null | null |
tools/preprocess_data.py
|
ningchaoar/Megatron-LM
|
39196e776a2e9d1bfb11622b988ad7dc9c0f471e
|
[
"MIT"
] | null | null | null |
tools/preprocess_data.py
|
ningchaoar/Megatron-LM
|
39196e776a2e9d1bfb11622b988ad7dc9c0f471e
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processing data for pretraining."""
import argparse
import json
import multiprocessing
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
os.path.pardir)))
import time
import torch
try:
import nltk
nltk_available = True
except ImportError:
nltk_available = False
from megatron.tokenizer import build_tokenizer
from megatron.data import indexed_dataset
# https://stackoverflow.com/questions/33139531/preserve-empty-lines-with-nltks-punkt-tokenizer
class CustomLanguageVars(nltk.tokenize.punkt.PunktLanguageVars):
_period_context_fmt = r"""
\S* # some word material
%(SentEndChars)s # a potential sentence ending
\s* # <-- THIS is what I changed
(?=(?P<after_tok>
%(NonWord)s # either other punctuation
|
(?P<next_tok>\S+) # <-- Normally you would have \s+ here
))"""
class IdentitySplitter(object):
def tokenize(self, *text):
return text
class Encoder(object):
def __init__(self, args):
self.args = args
def initializer(self):
# Use Encoder class as a container for global data
Encoder.tokenizer = build_tokenizer(self.args)
if self.args.split_sentences:
if not nltk_available:
print("NLTK is not available to split sentences.")
exit()
splitter = nltk.load("tokenizers/punkt/english.pickle")
if self.args.keep_newlines:
# this prevents punkt from eating newlines after sentences
Encoder.splitter = nltk.tokenize.punkt.PunktSentenceTokenizer(
train_text = splitter._params,
lang_vars = CustomLanguageVars())
else:
Encoder.splitter = splitter
else:
Encoder.splitter = IdentitySplitter()
def encode(self, json_line):
data = json.loads(json_line)
ids = {}
for key in self.args.json_keys:
text = data[key]
if len(text) == 0:
continue
doc_ids = []
for sentence in Encoder.splitter.tokenize(text):
sentence_ids = Encoder.tokenizer.tokenize(sentence)
if len(sentence_ids) > 0:
doc_ids.append(sentence_ids)
if len(doc_ids) > 0 and self.args.append_eod:
doc_ids[-1].append(Encoder.tokenizer.eod)
ids[key] = doc_ids
return ids, len(json_line)
def get_args():
parser = argparse.ArgumentParser()
group = parser.add_argument_group(title='input data')
group.add_argument('--input', type=str, required=True,
help='Path to input JSON')
group.add_argument('--json-keys', nargs='+', default=['text'],
help='space separate listed of keys to extract from json')
group.add_argument('--split-sentences', action='store_true',
help='Split documents into sentences.')
group.add_argument('--keep-newlines', action='store_true',
help='Keep newlines between sentences when splitting.')
group = parser.add_argument_group(title='tokenizer')
group.add_argument('--tokenizer-type', type=str, required=True,
choices=['BertWordPieceLowerCase','BertWordPieceCase',
'GPT2BPETokenizer'],
help='What type of tokenizer to use.')
group.add_argument('--vocab-file', type=str, default=None,
help='Path to the vocab file')
group.add_argument('--merge-file', type=str, default=None,
help='Path to the BPE merge file (if necessary).')
group.add_argument('--append-eod', action='store_true',
help='Append an <eod> token to the end of a document.')
group = parser.add_argument_group(title='output data')
group.add_argument('--output-prefix', type=str, required=True,
help='Path to binary output file without suffix')
group.add_argument('--dataset-impl', type=str, default='mmap',
choices=['lazy', 'cached', 'mmap'])
group = parser.add_argument_group(title='runtime')
group.add_argument('--workers', type=int, default=1,
help='Number of worker processes to launch')
group.add_argument('--log-interval', type=int, default=100,
help='Interval between progress updates')
args = parser.parse_args()
args.keep_empty = False
if args.tokenizer_type.lower().startswith('bert'):
if not args.split_sentences:
print("Bert tokenizer detected, are you sure you don't want to split sentences?")
# some default/dummy values for the tokenizer
args.rank = 0
args.make_vocab_size_divisible_by = 128
args.tensor_model_parallel_size = 1
args.vocab_extra_ids = 0
return args
def main():
args = get_args()
startup_start = time.time()
print("Opening", args.input)
fin = open(args.input, 'r', encoding='utf-8')
if nltk_available and args.split_sentences:
nltk.download("punkt", quiet=True)
encoder = Encoder(args)
tokenizer = build_tokenizer(args)
pool = multiprocessing.Pool(args.workers, initializer=encoder.initializer)
encoded_docs = pool.imap(encoder.encode, fin, 25)
#encoded_docs = map(encoder.encode, fin)
level = "document"
if args.split_sentences:
level = "sentence"
print(f"Vocab size: {tokenizer.vocab_size}")
print(f"Output prefix: {args.output_prefix}")
output_bin_files = {}
output_idx_files = {}
builders = {}
for key in args.json_keys:
output_bin_files[key] = "{}_{}_{}.bin".format(args.output_prefix,
key, level)
output_idx_files[key] = "{}_{}_{}.idx".format(args.output_prefix,
key, level)
builders[key] = indexed_dataset.make_builder(output_bin_files[key],
impl=args.dataset_impl,
vocab_size=tokenizer.vocab_size)
startup_end = time.time()
proc_start = time.time()
total_bytes_processed = 0
print("Time to startup:", startup_end - startup_start)
for i, (doc, bytes_processed) in enumerate(encoded_docs, start=1):
total_bytes_processed += bytes_processed
for key, sentences in doc.items():
if len(sentences) == 0:
continue
for sentence in sentences:
builders[key].add_item(torch.IntTensor(sentence))
builders[key].end_document()
if i % args.log_interval == 0:
current = time.time()
elapsed = current - proc_start
mbs = total_bytes_processed/elapsed/1024/1024
print(f"Processed {i} documents",
f"({i/elapsed} docs/s, {mbs} MB/s).",
file=sys.stderr)
for key in args.json_keys:
builders[key].finalize(output_idx_files[key])
if __name__ == '__main__':
main()
| 38.485437
| 94
| 0.605701
|
83fe6968dde7cbf2fd964be61e071b0ca8f50315
| 7,759
|
py
|
Python
|
tensorflow_data_validation/utils/test_util_test.py
|
devidipak/data-validation
|
85b7e3f71bd70e4986a55120aa6e24ecbc7b88ce
|
[
"Apache-2.0"
] | 1
|
2019-12-18T18:27:56.000Z
|
2019-12-18T18:27:56.000Z
|
tensorflow_data_validation/utils/test_util_test.py
|
devidipak/data-validation
|
85b7e3f71bd70e4986a55120aa6e24ecbc7b88ce
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_data_validation/utils/test_util_test.py
|
devidipak/data-validation
|
85b7e3f71bd70e4986a55120aa6e24ecbc7b88ce
|
[
"Apache-2.0"
] | null | null | null |
"""Tests for test_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from tensorflow_data_validation.utils import test_util
from google.protobuf import text_format
from tensorflow_metadata.proto.v0 import statistics_pb2
class TestAssertFeatureProtoEqual(absltest.TestCase):
"""Tests assert_feature_proto_equal."""
class SampleTestUsingAssertFeatureProtoEqual(
absltest.TestCase):
"""A mock test case.
Calls assert_feature_proto_equal.
"""
# This is a work around for unittest in Python 2. It requires the runTest
# method to be implemented if the test is being called directly instead of
# through unittest.main()/absltest.main().
def runTest(self):
pass
def assert_on_equal_feature_protos(self):
expected = text_format.Parse(
"""
name: 'a'
type: BYTES
custom_stats {
name: 'A'
num: 2.5
}
custom_stats {
name: 'B'
num: 3.0
}
""", statistics_pb2.FeatureNameStatistics())
actual = text_format.Parse(
"""
name: 'a'
type: BYTES
custom_stats {
name: 'B'
num: 3.0
}
custom_stats {
name: 'A'
num: 2.5
}
""", statistics_pb2.FeatureNameStatistics())
test_util.assert_feature_proto_equal(
self, actual, expected)
def assert_on_unequal_feature_protos(self):
expected = text_format.Parse(
"""
name: 'a'
custom_stats {
name: 'MI'
num: 2.5
}
""", statistics_pb2.FeatureNameStatistics())
actual = text_format.Parse(
"""
name: 'a'
custom_stats {
name: 'MI'
num: 2.0
}
""", statistics_pb2.FeatureNameStatistics())
test_util.assert_feature_proto_equal(
self, actual, expected)
def setUp(self):
super(TestAssertFeatureProtoEqual, self).setUp()
self._test = self.SampleTestUsingAssertFeatureProtoEqual()
def test_feature_protos_equal(self):
self.assertIsNone(self._test.assert_on_equal_feature_protos())
def test_feature_protos_unequal(self):
with self.assertRaises(AssertionError):
self._test.assert_on_unequal_feature_protos()
class TestAssertDatasetFeatureStatsProtoEqual(absltest.TestCase):
"""Tests assert_dataset_feature_stats_proto_equal."""
class SampleTestUsingAssertDatasetFeatureStatsProtoEqual(absltest.TestCase):
"""A mock test case.
Calls assert_dataset_feature_stats_proto_equal.
"""
# This is a work around for unittest in Python 2. It requires the runTest
# method to be implemented if the test is being called directly instead of
# through unittest.main()/absltest.main().
def runTest(self):
pass
def assert_on_two_protos_with_same_features_in_same_order(self):
expected = text_format.Parse(
"""
features {
name: 'fa'
type: STRING
string_stats {
unique: 4
}
}
features {
name: 'fb'
type: STRING
string_stats {
unique: 5
}
}
""", statistics_pb2.DatasetFeatureStatistics())
actual = text_format.Parse(
"""
features {
name: 'fa'
type: STRING
string_stats {
unique: 4
}
}
features {
name: 'fb'
type: STRING
string_stats {
unique: 5
}
}""", statistics_pb2.DatasetFeatureStatistics())
test_util.assert_dataset_feature_stats_proto_equal(self, actual, expected)
def assert_on_two_protos_with_same_features_in_different_order(self):
expected = text_format.Parse(
"""
features {
name: 'fb'
type: STRING
string_stats {
unique: 5
}
}
features {
name: 'fa'
type: STRING
string_stats {
unique: 4
}
}""", statistics_pb2.DatasetFeatureStatistics())
actual = text_format.Parse(
"""
features {
name: 'fa'
type: STRING
string_stats {
unique: 4
}
}
features {
name: 'fb'
type: STRING
string_stats {
unique: 5
}
}""", statistics_pb2.DatasetFeatureStatistics())
test_util.assert_dataset_feature_stats_proto_equal(self, actual, expected)
def assert_on_two_protos_with_different_features(self):
expected = text_format.Parse(
"""
features {
name: 'fa'
type: STRING
string_stats {
unique: 4
}
}""", statistics_pb2.DatasetFeatureStatistics())
actual = text_format.Parse(
"""
features {
name: 'fb'
type: STRING
string_stats {
unique: 5
}
}""", statistics_pb2.DatasetFeatureStatistics())
test_util.assert_dataset_feature_stats_proto_equal(self, actual, expected)
def assert_on_two_protos_with_different_numbers_of_features(self):
expected = text_format.Parse(
"""
features {
name: 'fa'
type: STRING
string_stats {
unique: 4
}
}
features {
name: 'fb'
type: STRING
string_stats {
unique: 5
}
}""", statistics_pb2.DatasetFeatureStatistics())
actual = text_format.Parse(
"""
features {
name: 'fa'
type: STRING
string_stats {
unique: 4
}
}""", statistics_pb2.DatasetFeatureStatistics())
test_util.assert_dataset_feature_stats_proto_equal(self, actual, expected)
def assert_on_two_protos_with_different_num_examples(self):
expected = text_format.Parse(
"""
num_examples: 1
features {
name: 'fa'
type: STRING
string_stats {
unique: 4
}
}
""", statistics_pb2.DatasetFeatureStatistics())
actual = text_format.Parse(
"""
num_examples: 2
features {
name: 'fa'
type: STRING
string_stats {
unique: 4
}
}""", statistics_pb2.DatasetFeatureStatistics())
test_util.assert_dataset_feature_stats_proto_equal(self, actual, expected)
def setUp(self):
super(TestAssertDatasetFeatureStatsProtoEqual, self).setUp()
self._test = self.SampleTestUsingAssertDatasetFeatureStatsProtoEqual()
def test_two_protos_with_same_features_in_same_order(self):
self.assertIsNone(
self._test.assert_on_two_protos_with_same_features_in_same_order())
def test_two_protos_with_same_features_in_different_order(self):
self.assertIsNone(
self._test.assert_on_two_protos_with_same_features_in_different_order())
def test_two_protos_with_different_features(self):
with self.assertRaises(AssertionError):
self._test.assert_on_two_protos_with_different_features()
def test_two_protos_with_different_numbers_of_features(self):
with self.assertRaises(AssertionError):
self._test.assert_on_two_protos_with_different_numbers_of_features()
def test_two_protos_with_different_num_examples(self):
with self.assertRaises(AssertionError):
self._test.assert_on_two_protos_with_different_num_examples()
if __name__ == '__main__':
absltest.main()
| 27.910072
| 80
| 0.603686
|
5cd016398eaab82446f91c8ae92e6bc36aef2f9a
| 5,545
|
py
|
Python
|
python/DeepSeaSceneLighting/SpotLight.py
|
akb825/DeepSea
|
fff790d0a472cf2f9f89de653e0b4470ce605d24
|
[
"Apache-2.0"
] | 5
|
2018-11-17T23:13:22.000Z
|
2021-09-30T13:37:04.000Z
|
python/DeepSeaSceneLighting/SpotLight.py
|
akb825/DeepSea
|
fff790d0a472cf2f9f89de653e0b4470ce605d24
|
[
"Apache-2.0"
] | null | null | null |
python/DeepSeaSceneLighting/SpotLight.py
|
akb825/DeepSea
|
fff790d0a472cf2f9f89de653e0b4470ce605d24
|
[
"Apache-2.0"
] | 2
|
2019-09-23T12:23:35.000Z
|
2020-04-07T05:31:06.000Z
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: DeepSeaSceneLighting
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class SpotLight(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = SpotLight()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsSpotLight(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
# SpotLight
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# SpotLight
def Position(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = o + self._tab.Pos
from DeepSeaScene.Vector3f import Vector3f
obj = Vector3f()
obj.Init(self._tab.Bytes, x)
return obj
return None
# SpotLight
def Direction(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
x = o + self._tab.Pos
from DeepSeaScene.Vector3f import Vector3f
obj = Vector3f()
obj.Init(self._tab.Bytes, x)
return obj
return None
# SpotLight
def Color(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
x = o + self._tab.Pos
from DeepSeaScene.Color3f import Color3f
obj = Color3f()
obj.Init(self._tab.Bytes, x)
return obj
return None
# SpotLight
def Intensity(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return 0.0
# SpotLight
def LinearFalloff(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return 0.0
# SpotLight
def QuadraticFalloff(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return 0.0
# SpotLight
def InnerSpotAngle(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return 0.0
# SpotLight
def OuterSpotAngle(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return 0.0
def Start(builder): builder.StartObject(8)
def SpotLightStart(builder):
"""This method is deprecated. Please switch to Start."""
return Start(builder)
def AddPosition(builder, position): builder.PrependStructSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(position), 0)
def SpotLightAddPosition(builder, position):
"""This method is deprecated. Please switch to AddPosition."""
return AddPosition(builder, position)
def AddDirection(builder, direction): builder.PrependStructSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(direction), 0)
def SpotLightAddDirection(builder, direction):
"""This method is deprecated. Please switch to AddDirection."""
return AddDirection(builder, direction)
def AddColor(builder, color): builder.PrependStructSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(color), 0)
def SpotLightAddColor(builder, color):
"""This method is deprecated. Please switch to AddColor."""
return AddColor(builder, color)
def AddIntensity(builder, intensity): builder.PrependFloat32Slot(3, intensity, 0.0)
def SpotLightAddIntensity(builder, intensity):
"""This method is deprecated. Please switch to AddIntensity."""
return AddIntensity(builder, intensity)
def AddLinearFalloff(builder, linearFalloff): builder.PrependFloat32Slot(4, linearFalloff, 0.0)
def SpotLightAddLinearFalloff(builder, linearFalloff):
"""This method is deprecated. Please switch to AddLinearFalloff."""
return AddLinearFalloff(builder, linearFalloff)
def AddQuadraticFalloff(builder, quadraticFalloff): builder.PrependFloat32Slot(5, quadraticFalloff, 0.0)
def SpotLightAddQuadraticFalloff(builder, quadraticFalloff):
"""This method is deprecated. Please switch to AddQuadraticFalloff."""
return AddQuadraticFalloff(builder, quadraticFalloff)
def AddInnerSpotAngle(builder, innerSpotAngle): builder.PrependFloat32Slot(6, innerSpotAngle, 0.0)
def SpotLightAddInnerSpotAngle(builder, innerSpotAngle):
"""This method is deprecated. Please switch to AddInnerSpotAngle."""
return AddInnerSpotAngle(builder, innerSpotAngle)
def AddOuterSpotAngle(builder, outerSpotAngle): builder.PrependFloat32Slot(7, outerSpotAngle, 0.0)
def SpotLightAddOuterSpotAngle(builder, outerSpotAngle):
"""This method is deprecated. Please switch to AddOuterSpotAngle."""
return AddOuterSpotAngle(builder, outerSpotAngle)
def End(builder): return builder.EndObject()
def SpotLightEnd(builder):
"""This method is deprecated. Please switch to End."""
return End(builder)
| 41.380597
| 128
| 0.699188
|
01b31e905f9b507d191afb83b919048d29f95444
| 274
|
py
|
Python
|
DjangoSite/apps/notes/serializers.py
|
abec/getabe.com
|
adf4a447b1b063010d76ac0c76586c0d4d0ff5c3
|
[
"MIT"
] | null | null | null |
DjangoSite/apps/notes/serializers.py
|
abec/getabe.com
|
adf4a447b1b063010d76ac0c76586c0d4d0ff5c3
|
[
"MIT"
] | null | null | null |
DjangoSite/apps/notes/serializers.py
|
abec/getabe.com
|
adf4a447b1b063010d76ac0c76586c0d4d0ff5c3
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from .models import ChineseMetadata
__all__ = ['ChineseWordsSerializer']
class ChineseWordsSerializer(serializers.ModelSerializer):
class Meta:
model = ChineseMetadata
depth = 2
fields = ('native', 'gender', 'words')
| 21.076923
| 58
| 0.751825
|
f8847755bfcc79b0c0b90cc46de8a588b79611a2
| 467
|
py
|
Python
|
algorithms/virtual-object-detector/capturers/videoCapturer.py
|
AlgoveraAI/creations
|
6aa1cfffb4a59b7ff9fd9573ed743195be5bcfdc
|
[
"MIT"
] | null | null | null |
algorithms/virtual-object-detector/capturers/videoCapturer.py
|
AlgoveraAI/creations
|
6aa1cfffb4a59b7ff9fd9573ed743195be5bcfdc
|
[
"MIT"
] | null | null | null |
algorithms/virtual-object-detector/capturers/videoCapturer.py
|
AlgoveraAI/creations
|
6aa1cfffb4a59b7ff9fd9573ed743195be5bcfdc
|
[
"MIT"
] | null | null | null |
import cv2
import os
from os.path import exists
def detect_from_video(videoPath, callback):
if exists(videoPath) == False:
return
cap = cv2.VideoCapture(videoPath)
i = 0
while(cap.isOpened()):
ret, frame = cap.read()
if (ret == False):
break
if not callback == None:
callback(frame, os.path.basename(videoPath), i)
i += 1
cap.release()
cv2.destroyAllWindows()
| 22.238095
| 59
| 0.567452
|
46816fcbfdfbc701c4a967504e9726fdecd51b53
| 14,816
|
py
|
Python
|
cherrypy/test/test_objectmapping.py
|
seshness/bearlol
|
ff89bc1e66a96b6e55538a5cee38370e08e5b682
|
[
"BSD-3-Clause"
] | 2
|
2020-12-28T22:37:45.000Z
|
2021-01-23T18:04:46.000Z
|
cherrypy/test/test_objectmapping.py
|
seshness/bearlol
|
ff89bc1e66a96b6e55538a5cee38370e08e5b682
|
[
"BSD-3-Clause"
] | null | null | null |
cherrypy/test/test_objectmapping.py
|
seshness/bearlol
|
ff89bc1e66a96b6e55538a5cee38370e08e5b682
|
[
"BSD-3-Clause"
] | null | null | null |
import cherrypy
from cherrypy._cptree import Application
from cherrypy.test import helper
script_names = ["", "/foo", "/users/fred/blog", "/corp/blog"]
class ObjectMappingTest(helper.CPWebCase):
def setup_server():
class Root:
def index(self, name="world"):
return name
index.exposed = True
def foobar(self):
return "bar"
foobar.exposed = True
def default(self, *params, **kwargs):
return "default:" + repr(params)
default.exposed = True
def other(self):
return "other"
other.exposed = True
def extra(self, *p):
return repr(p)
extra.exposed = True
def redirect(self):
raise cherrypy.HTTPRedirect('dir1/', 302)
redirect.exposed = True
def notExposed(self):
return "not exposed"
def confvalue(self):
return cherrypy.request.config.get("user")
confvalue.exposed = True
def redirect_via_url(self, path):
raise cherrypy.HTTPRedirect(cherrypy.url(path))
redirect_via_url.exposed = True
def translate_html(self):
return "OK"
translate_html.exposed = True
def mapped_func(self, ID=None):
return "ID is %s" % ID
mapped_func.exposed = True
setattr(Root, "Von B\xfclow", mapped_func)
class Exposing:
def base(self):
return "expose works!"
cherrypy.expose(base)
cherrypy.expose(base, "1")
cherrypy.expose(base, "2")
class ExposingNewStyle(object):
def base(self):
return "expose works!"
cherrypy.expose(base)
cherrypy.expose(base, "1")
cherrypy.expose(base, "2")
class Dir1:
def index(self):
return "index for dir1"
index.exposed = True
def myMethod(self):
return "myMethod from dir1, path_info is:" + repr(cherrypy.request.path_info)
myMethod.exposed = True
myMethod._cp_config = {'tools.trailing_slash.extra': True}
def default(self, *params):
return "default for dir1, param is:" + repr(params)
default.exposed = True
class Dir2:
def index(self):
return "index for dir2, path is:" + cherrypy.request.path_info
index.exposed = True
def script_name(self):
return cherrypy.tree.script_name()
script_name.exposed = True
def cherrypy_url(self):
return cherrypy.url("/extra")
cherrypy_url.exposed = True
def posparam(self, *vpath):
return "/".join(vpath)
posparam.exposed = True
class Dir3:
def default(self):
return "default for dir3, not exposed"
class Dir4:
def index(self):
return "index for dir4, not exposed"
class DefNoIndex:
def default(self, *args):
raise cherrypy.HTTPRedirect("contact")
default.exposed = True
# MethodDispatcher code
class ByMethod:
exposed = True
def __init__(self, *things):
self.things = list(things)
def GET(self):
return repr(self.things)
def POST(self, thing):
self.things.append(thing)
class Collection:
default = ByMethod('a', 'bit')
Root.exposing = Exposing()
Root.exposingnew = ExposingNewStyle()
Root.dir1 = Dir1()
Root.dir1.dir2 = Dir2()
Root.dir1.dir2.dir3 = Dir3()
Root.dir1.dir2.dir3.dir4 = Dir4()
Root.defnoindex = DefNoIndex()
Root.bymethod = ByMethod('another')
Root.collection = Collection()
d = cherrypy.dispatch.MethodDispatcher()
for url in script_names:
conf = {'/': {'user': (url or "/").split("/")[-2]},
'/bymethod': {'request.dispatch': d},
'/collection': {'request.dispatch': d},
}
cherrypy.tree.mount(Root(), url, conf)
class Isolated:
def index(self):
return "made it!"
index.exposed = True
cherrypy.tree.mount(Isolated(), "/isolated")
class AnotherApp:
exposed = True
def GET(self):
return "milk"
cherrypy.tree.mount(AnotherApp(), "/app", {'/': {'request.dispatch': d}})
setup_server = staticmethod(setup_server)
def testObjectMapping(self):
for url in script_names:
prefix = self.script_name = url
self.getPage('/')
self.assertBody('world')
self.getPage("/dir1/myMethod")
self.assertBody("myMethod from dir1, path_info is:'/dir1/myMethod'")
self.getPage("/this/method/does/not/exist")
self.assertBody("default:('this', 'method', 'does', 'not', 'exist')")
self.getPage("/extra/too/much")
self.assertBody("('too', 'much')")
self.getPage("/other")
self.assertBody('other')
self.getPage("/notExposed")
self.assertBody("default:('notExposed',)")
self.getPage("/dir1/dir2/")
self.assertBody('index for dir2, path is:/dir1/dir2/')
# Test omitted trailing slash (should be redirected by default).
self.getPage("/dir1/dir2")
self.assertStatus(301)
self.assertHeader('Location', '%s/dir1/dir2/' % self.base())
# Test extra trailing slash (should be redirected if configured).
self.getPage("/dir1/myMethod/")
self.assertStatus(301)
self.assertHeader('Location', '%s/dir1/myMethod' % self.base())
# Test that default method must be exposed in order to match.
self.getPage("/dir1/dir2/dir3/dir4/index")
self.assertBody("default for dir1, param is:('dir2', 'dir3', 'dir4', 'index')")
# Test *vpath when default() is defined but not index()
# This also tests HTTPRedirect with default.
self.getPage("/defnoindex")
self.assertStatus((302, 303))
self.assertHeader('Location', '%s/contact' % self.base())
self.getPage("/defnoindex/")
self.assertStatus((302, 303))
self.assertHeader('Location', '%s/defnoindex/contact' % self.base())
self.getPage("/defnoindex/page")
self.assertStatus((302, 303))
self.assertHeader('Location', '%s/defnoindex/contact' % self.base())
self.getPage("/redirect")
self.assertStatus('302 Found')
self.assertHeader('Location', '%s/dir1/' % self.base())
if not getattr(cherrypy.server, "using_apache", False):
# Test that we can use URL's which aren't all valid Python identifiers
# This should also test the %XX-unquoting of URL's.
self.getPage("/Von%20B%fclow?ID=14")
self.assertBody("ID is 14")
# Test that %2F in the path doesn't get unquoted too early;
# that is, it should not be used to separate path components.
# See ticket #393.
self.getPage("/page%2Fname")
self.assertBody("default:('page/name',)")
self.getPage("/dir1/dir2/script_name")
self.assertBody(url)
self.getPage("/dir1/dir2/cherrypy_url")
self.assertBody("%s/extra" % self.base())
# Test that configs don't overwrite each other from diferent apps
self.getPage("/confvalue")
self.assertBody((url or "/").split("/")[-2])
self.script_name = ""
# Test absoluteURI's in the Request-Line
self.getPage('http://%s:%s/' % (self.interface(), self.PORT))
self.assertBody('world')
self.getPage('http://%s:%s/abs/?service=http://192.168.0.1/x/y/z' %
(self.interface(), self.PORT))
self.assertBody("default:('abs',)")
self.getPage('/rel/?service=http://192.168.120.121:8000/x/y/z')
self.assertBody("default:('rel',)")
# Test that the "isolated" app doesn't leak url's into the root app.
# If it did leak, Root.default() would answer with
# "default:('isolated', 'doesnt', 'exist')".
self.getPage("/isolated/")
self.assertStatus("200 OK")
self.assertBody("made it!")
self.getPage("/isolated/doesnt/exist")
self.assertStatus("404 Not Found")
# Make sure /foobar maps to Root.foobar and not to the app
# mounted at /foo. See http://www.cherrypy.org/ticket/573
self.getPage("/foobar")
self.assertBody("bar")
def test_translate(self):
self.getPage("/translate_html")
self.assertStatus("200 OK")
self.assertBody("OK")
self.getPage("/translate.html")
self.assertStatus("200 OK")
self.assertBody("OK")
self.getPage("/translate-html")
self.assertStatus("200 OK")
self.assertBody("OK")
def test_redir_using_url(self):
for url in script_names:
prefix = self.script_name = url
# Test the absolute path to the parent (leading slash)
self.getPage('/redirect_via_url?path=./')
self.assertStatus(('302 Found', '303 See Other'))
self.assertHeader('Location', '%s/' % self.base())
# Test the relative path to the parent (no leading slash)
self.getPage('/redirect_via_url?path=./')
self.assertStatus(('302 Found', '303 See Other'))
self.assertHeader('Location', '%s/' % self.base())
# Test the absolute path to the parent (leading slash)
self.getPage('/redirect_via_url/?path=./')
self.assertStatus(('302 Found', '303 See Other'))
self.assertHeader('Location', '%s/' % self.base())
# Test the relative path to the parent (no leading slash)
self.getPage('/redirect_via_url/?path=./')
self.assertStatus(('302 Found', '303 See Other'))
self.assertHeader('Location', '%s/' % self.base())
def testPositionalParams(self):
self.getPage("/dir1/dir2/posparam/18/24/hut/hike")
self.assertBody("18/24/hut/hike")
# intermediate index methods should not receive posparams;
# only the "final" index method should do so.
self.getPage("/dir1/dir2/5/3/sir")
self.assertBody("default for dir1, param is:('dir2', '5', '3', 'sir')")
# test that extra positional args raises an 404 Not Found
# See http://www.cherrypy.org/ticket/733.
self.getPage("/dir1/dir2/script_name/extra/stuff")
self.assertStatus(404)
def testExpose(self):
# Test the cherrypy.expose function/decorator
self.getPage("/exposing/base")
self.assertBody("expose works!")
self.getPage("/exposing/1")
self.assertBody("expose works!")
self.getPage("/exposing/2")
self.assertBody("expose works!")
self.getPage("/exposingnew/base")
self.assertBody("expose works!")
self.getPage("/exposingnew/1")
self.assertBody("expose works!")
self.getPage("/exposingnew/2")
self.assertBody("expose works!")
def testMethodDispatch(self):
self.getPage("/bymethod")
self.assertBody("['another']")
self.assertHeader('Allow', 'GET, HEAD, POST')
self.getPage("/bymethod", method="HEAD")
self.assertBody("")
self.assertHeader('Allow', 'GET, HEAD, POST')
self.getPage("/bymethod", method="POST", body="thing=one")
self.assertBody("")
self.assertHeader('Allow', 'GET, HEAD, POST')
self.getPage("/bymethod")
self.assertBody("['another', u'one']")
self.assertHeader('Allow', 'GET, HEAD, POST')
self.getPage("/bymethod", method="PUT")
self.assertErrorPage(405)
self.assertHeader('Allow', 'GET, HEAD, POST')
# Test default with posparams
self.getPage("/collection/silly", method="POST")
self.getPage("/collection", method="GET")
self.assertBody("['a', 'bit', 'silly']")
# Test custom dispatcher set on app root (see #737).
self.getPage("/app")
self.assertBody("milk")
def testTreeMounting(self):
class Root(object):
def hello(self):
return "Hello world!"
hello.exposed = True
# When mounting an application instance,
# we can't specify a different script name in the call to mount.
a = Application(Root(), '/somewhere')
self.assertRaises(ValueError, cherrypy.tree.mount, a, '/somewhereelse')
# When mounting an application instance...
a = Application(Root(), '/somewhere')
# ...we MUST allow in identical script name in the call to mount...
cherrypy.tree.mount(a, '/somewhere')
self.getPage('/somewhere/hello')
self.assertStatus(200)
# ...and MUST allow a missing script_name.
del cherrypy.tree.apps['/somewhere']
cherrypy.tree.mount(a)
self.getPage('/somewhere/hello')
self.assertStatus(200)
# In addition, we MUST be able to create an Application using
# script_name == None for access to the wsgi_environ.
a = Application(Root(), script_name=None)
# However, this does not apply to tree.mount
self.assertRaises(TypeError, cherrypy.tree.mount, a, None)
| 36.673267
| 93
| 0.527875
|
51cadfd298fa0ce5c47fcd24d421a9eda80c6cc0
| 1,325
|
py
|
Python
|
python_src/graphene-3.0/graphene/test/__init__.py
|
MilesLitteral/graphene-haskell
|
bd157a2678525ef61e7ba239e6c3a338c41228d8
|
[
"MIT"
] | null | null | null |
python_src/graphene-3.0/graphene/test/__init__.py
|
MilesLitteral/graphene-haskell
|
bd157a2678525ef61e7ba239e6c3a338c41228d8
|
[
"MIT"
] | null | null | null |
python_src/graphene-3.0/graphene/test/__init__.py
|
MilesLitteral/graphene-haskell
|
bd157a2678525ef61e7ba239e6c3a338c41228d8
|
[
"MIT"
] | null | null | null |
from numpy import integer
from promise import Promise, is_thenable
from graphql.error import format_error as format_graphql_error
from graphql.error import GraphQLError
from graphene.types.schema import Schema
def default_format_error(error):
if isinstance(error, GraphQLError):
return format_graphql_error(error)
return {"message": str(error)}
def format_execution_result(execution_result, format_error):
if execution_result:
response = {}
if execution_result.errors:
response["errors"] = [format_error(e) for e in execution_result.errors]
response["data"] = execution_result.data
return response
class Client:
def __init__(self, schema: integer, format_error=None, **execute_options):
assert isinstance(schema, Schema)
self.schema = schema
self.execute_options = execute_options
self.format_error = format_error or default_format_error
def format_result(self, result):
return format_execution_result(result, self.format_error)
def execute(self, *args, **kwargs):
executed = self.schema.execute(*args, **dict(self.execute_options, **kwargs))
if is_thenable(executed):
return Promise.resolve(executed).then(self.format_result)
return self.format_result(executed)
| 33.125
| 85
| 0.72
|
29f2ae64383ed1fbd86fd689e6391b7ba80a7628
| 393
|
py
|
Python
|
api/curve/v1/test/band_test.py
|
QiliangFan/Baidu-Curve
|
b3573b9f0e44557f0bf2455ec7d5a85bc0cffdef
|
[
"Apache-2.0"
] | 478
|
2017-10-26T11:55:28.000Z
|
2022-03-28T06:50:58.000Z
|
api/curve/v1/test/band_test.py
|
QiliangFan/Baidu-Curve
|
b3573b9f0e44557f0bf2455ec7d5a85bc0cffdef
|
[
"Apache-2.0"
] | 60
|
2017-10-28T07:45:45.000Z
|
2020-12-04T14:12:55.000Z
|
api/curve/v1/test/band_test.py
|
QiliangFan/Baidu-Curve
|
b3573b9f0e44557f0bf2455ec7d5a85bc0cffdef
|
[
"Apache-2.0"
] | 132
|
2017-10-24T06:09:05.000Z
|
2021-09-21T16:03:10.000Z
|
# -*- coding: utf-8 -*-
"""
Testing
~~~~
band test
:copyright: (c) 2017-2018 by Baidu, Inc.
:license: Apache, see LICENSE for more details.
"""
from .base import IcurveTestCase
class BandTestCase(IcurveTestCase):
"""
band test
"""
def test_get_band(self):
pass # not implemented
def test_delete_band(self):
pass # not implemented
| 17.086957
| 51
| 0.600509
|
c189cddbd25b1519372c9725856a4ed15913adcf
| 1,189
|
py
|
Python
|
ampel/abstract/AbsBufferComplement.py
|
AmpelProject/Ampel-core
|
dcbfbe38ba400b7f8e44e641b90217ca1bed4f8f
|
[
"BSD-3-Clause"
] | 5
|
2021-04-15T07:43:26.000Z
|
2022-03-04T09:25:09.000Z
|
ampel/abstract/AbsBufferComplement.py
|
AmpelProject/Ampel-core
|
dcbfbe38ba400b7f8e44e641b90217ca1bed4f8f
|
[
"BSD-3-Clause"
] | 67
|
2021-02-23T21:43:20.000Z
|
2021-12-15T23:28:32.000Z
|
ampel/abstract/AbsBufferComplement.py
|
AmpelProject/Ampel-core
|
dcbfbe38ba400b7f8e44e641b90217ca1bed4f8f
|
[
"BSD-3-Clause"
] | 1
|
2021-04-26T07:52:19.000Z
|
2021-04-26T07:52:19.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File : Ampel-core/ampel/abstract/AbsBufferComplement.py
# License : BSD-3-Clause
# Author : vb <vbrinnel@physik.hu-berlin.de>
# Date : 16.01.2020
# Last Modified Date: 15.04.2021
# Last Modified By : vb <vbrinnel@physik.hu-berlin.de>
from typing import Iterable, Optional, Dict, Any
from ampel.protocol.LoggerProtocol import LoggerProtocol
from ampel.base.AmpelABC import AmpelABC
from ampel.base.decorator import abstractmethod
from ampel.struct.AmpelBuffer import AmpelBuffer
from ampel.core.ContextUnit import ContextUnit
# Inherits ContextUnit because implementing classes might need access to
# an AmpelConfig instance (foremost to the contained resource definitions)
class AbsBufferComplement(AmpelABC, ContextUnit, abstract=True):
"""
Complement :class:`~ampel.core.AmpelBuffer.AmpelBuffer` with information
stored outside the Ampel database.
"""
logger: LoggerProtocol
session_info: Optional[Dict[str, Any]] = None
@abstractmethod
def complement(self, it: Iterable[AmpelBuffer]) -> None:
"""Add information to each :class:`~ampel.core.AmpelBuffer.AmpelBuffer` """
...
| 36.030303
| 77
| 0.742641
|
a5c9c96441c3ecc0e0e69d0c2aebbafd1192ee8b
| 2,965
|
py
|
Python
|
ScreenServer/rgbScreenServer.py
|
acrandal/MazeRGB-LED
|
1d23b988fa969b1db273bb179a4caecca416d5cc
|
[
"MIT"
] | null | null | null |
ScreenServer/rgbScreenServer.py
|
acrandal/MazeRGB-LED
|
1d23b988fa969b1db273bb179a4caecca416d5cc
|
[
"MIT"
] | null | null | null |
ScreenServer/rgbScreenServer.py
|
acrandal/MazeRGB-LED
|
1d23b988fa969b1db273bb179a4caecca416d5cc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from samplebase import SampleBase
from random import randint, uniform
from time import sleep
import pika, os, sys
import json
from pprint import pprint
class Coordinate:
def __init__(self, x, y):
self.x = x
self.y = y
class Color:
def __init__(self, r, g, b):
self.r = r
self.g = g
self.b = b
class Pixel:
def __init__(self, coordinate, color):
self.coordinate = coordinate
self.color = color
class ScreenServer(SampleBase):
def __init__(self, *args, **kwargs):
super(ScreenServer, self).__init__(*args, **kwargs)
def drawPixel(self, pixel: Pixel):
self.matrix.SetPixel(pixel.coordinate.x, pixel.coordinate.y, pixel.color.r, pixel.color.g, pixel.color.b)
def redrawPixels(self, pixels: list):
# new_canvas = self.matrix.CreateFrameCanvas()
for pixel_dat in pixels:
self.new_canvas.SetPixel(
pixel_dat["coordinate"]["x"],
pixel_dat["coordinate"]["y"],
pixel_dat["color"]["r"],
pixel_dat["color"]["g"],
pixel_dat["color"]["b"]
)
self.new_canvas = self.matrix.SwapOnVSync(self.new_canvas)
def jsonHandler(self, msg):
try:
dat = json.loads(msg)
except:
print(f"JSON parse fail: {msg}")
return
#pprint(dat)
if dat["type"] == "clear":
self.matrix.Clear()
elif dat["type"] == "drawPixel":
coord = Coordinate(dat["pixel"]["coordinate"]["x"],
dat["pixel"]["coordinate"]["y"])
color = Color(
dat["pixel"]["color"]["r"],
dat["pixel"]["color"]["g"],
dat["pixel"]["color"]["b"])
pixel = Pixel(coord, color)
self.drawPixel(pixel)
elif dat["type"] == "redraw":
pixels = dat["pixels"]
# pprint(pixels)
self.redrawPixels(pixels)
def messageHandler(self, ch, method, properties, body):
msg = str(body, 'utf-8')
if msg[0] == '{':
self.jsonHandler(msg)
else:
print(f"Unknown message format: {msg}")
def run(self):
self.new_canvas = self.matrix.CreateFrameCanvas()
queueName = 'MazeScreen'
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.queue_declare(queue=queueName)
channel.basic_consume(queue=queueName, on_message_callback=self.messageHandler, auto_ack=True)
print(' [*] Waiting for messages. To exit press CTRL+C')
channel.start_consuming()
# Main function
if __name__ == "__main__":
print("Starting RGB server")
screenServer = ScreenServer()
if (not screenServer.process()):
screenServer.print_help()
print("Shutting down.")
| 27.453704
| 113
| 0.568634
|
c20b32c154962ad604a2caea73a1c5ebf68bfcfc
| 1,161
|
py
|
Python
|
bigquery_storage/docs/quickstart_test.py
|
nielm/google-cloud-python
|
fd126fdea34206109eb00d675374ff7dc4dcc5ef
|
[
"Apache-2.0"
] | 1
|
2019-06-14T10:11:59.000Z
|
2019-06-14T10:11:59.000Z
|
bigquery_storage/docs/quickstart_test.py
|
nielm/google-cloud-python
|
fd126fdea34206109eb00d675374ff7dc4dcc5ef
|
[
"Apache-2.0"
] | 1
|
2018-04-06T19:51:23.000Z
|
2018-04-06T19:51:23.000Z
|
bigquery_storage/docs/quickstart_test.py
|
nielm/google-cloud-python
|
fd126fdea34206109eb00d675374ff7dc4dcc5ef
|
[
"Apache-2.0"
] | 1
|
2020-04-14T10:47:41.000Z
|
2020-04-14T10:47:41.000Z
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
import pytest
import quickstart
def now_millis():
return int(
(datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)).total_seconds()
* 1000
)
@pytest.fixture()
def project_id():
return os.environ["PROJECT_ID"]
def test_quickstart_wo_snapshot(capsys, project_id):
quickstart.main(project_id)
out, _ = capsys.readouterr()
assert "WA" in out
def test_quickstart_with_snapshot(capsys, project_id):
quickstart.main(project_id, now_millis() - 5000)
out, _ = capsys.readouterr()
assert "WA" in out
| 25.8
| 84
| 0.727821
|
6050614077dea3c8e79bc406be8692febfd1ff44
| 80,927
|
py
|
Python
|
benchmarks/ltl_maxplus/f3/maxplus_28_8.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | 3
|
2021-04-23T23:29:26.000Z
|
2022-03-23T10:00:30.000Z
|
benchmarks/ltl_maxplus/f3/maxplus_28_8.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | null | null | null |
benchmarks/ltl_maxplus/f3/maxplus_28_8.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | 1
|
2021-11-17T22:02:56.000Z
|
2021-11-17T22:02:56.000Z
|
from collections import Iterable
from mathsat import msat_term, msat_env
from mathsat import msat_make_true, msat_make_false
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_rational_type
from mathsat import msat_make_and as _msat_make_and
from mathsat import msat_make_or as _msat_make_or
from mathsat import msat_make_not
from mathsat import msat_make_leq, msat_make_equal
from mathsat import msat_make_number, msat_make_plus, msat_make_times
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next
def msat_make_and(menv: msat_env, *args):
if len(args) == 0:
return msat_make_true(menv)
if len(args) == 1:
return args[0]
res = _msat_make_and(menv, args[0], args[1])
for arg in args[2:]:
res = _msat_make_and(menv, res, arg)
return res
def msat_make_or(menv: msat_env, *args):
if len(args) == 0:
return msat_make_false(menv)
if len(args) == 1:
return args[0]
res = _msat_make_or(menv, args[0], args[1])
for arg in args[2:]:
res = _msat_make_or(menv, res, arg)
return res
def msat_make_minus(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_m1 = msat_make_number(menv, "-1")
arg1 = msat_make_times(menv, arg1, n_m1)
return msat_make_plus(menv, arg0, arg1)
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term,
msat_term, msat_term):
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
real_type = msat_get_rational_type(menv)
names = ["x_0", "x_1", "x_2", "x_3", "x_4", "x_5", "x_6", "x_7", "x_8", "x_9", "x_10", "x_11", "x_12", "x_13", "x_14", "x_15", "x_16", "x_17", "x_18", "x_19", "x_20", "x_21", "x_22", "x_23", "x_24", "x_25", "x_26", "x_27"]
xs = [msat_declare_function(menv, name, real_type)
for name in names]
xs = [msat_make_constant(menv, x) for x in xs]
x_xs = [msat_declare_function(menv, name_next(name), real_type)
for name in names]
x_xs = [msat_make_constant(menv, x_x) for x_x in x_xs]
curr2next = {x: x_x for x, x_x in zip(xs, x_xs)}
n_10_0 = msat_make_number(menv, "10.0")
n_11_0 = msat_make_number(menv, "11.0")
n_12_0 = msat_make_number(menv, "12.0")
n_13_0 = msat_make_number(menv, "13.0")
n_14_0 = msat_make_number(menv, "14.0")
n_15_0 = msat_make_number(menv, "15.0")
n_16_0 = msat_make_number(menv, "16.0")
n_17_0 = msat_make_number(menv, "17.0")
n_18_0 = msat_make_number(menv, "18.0")
n_19_0 = msat_make_number(menv, "19.0")
n_1_0 = msat_make_number(menv, "1.0")
n_20_0 = msat_make_number(menv, "20.0")
n_2_0 = msat_make_number(menv, "2.0")
n_3_0 = msat_make_number(menv, "3.0")
n_4_0 = msat_make_number(menv, "4.0")
n_5_0 = msat_make_number(menv, "5.0")
n_6_0 = msat_make_number(menv, "6.0")
n_7_0 = msat_make_number(menv, "7.0")
n_8_0 = msat_make_number(menv, "8.0")
n_9_0 = msat_make_number(menv, "9.0")
init = msat_make_true(menv)
trans = msat_make_true(menv)
# transitions
expr0 = msat_make_plus(menv, xs[0], n_2_0)
expr1 = msat_make_plus(menv, xs[2], n_17_0)
expr2 = msat_make_plus(menv, xs[4], n_2_0)
expr3 = msat_make_plus(menv, xs[11], n_19_0)
expr4 = msat_make_plus(menv, xs[12], n_10_0)
expr5 = msat_make_plus(menv, xs[13], n_6_0)
expr6 = msat_make_plus(menv, xs[14], n_9_0)
expr7 = msat_make_plus(menv, xs[15], n_20_0)
expr8 = msat_make_plus(menv, xs[17], n_19_0)
expr9 = msat_make_plus(menv, xs[21], n_5_0)
expr10 = msat_make_plus(menv, xs[24], n_18_0)
expr11 = msat_make_plus(menv, xs[25], n_9_0)
expr12 = msat_make_plus(menv, xs[26], n_3_0)
expr13 = msat_make_plus(menv, xs[27], n_16_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[0], expr0),
msat_make_geq(menv, x_xs[0], expr1),
msat_make_geq(menv, x_xs[0], expr2),
msat_make_geq(menv, x_xs[0], expr3),
msat_make_geq(menv, x_xs[0], expr4),
msat_make_geq(menv, x_xs[0], expr5),
msat_make_geq(menv, x_xs[0], expr6),
msat_make_geq(menv, x_xs[0], expr7),
msat_make_geq(menv, x_xs[0], expr8),
msat_make_geq(menv, x_xs[0], expr9),
msat_make_geq(menv, x_xs[0], expr10),
msat_make_geq(menv, x_xs[0], expr11),
msat_make_geq(menv, x_xs[0], expr12),
msat_make_geq(menv, x_xs[0], expr13),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[0], expr0),
msat_make_equal(menv, x_xs[0], expr1),
msat_make_equal(menv, x_xs[0], expr2),
msat_make_equal(menv, x_xs[0], expr3),
msat_make_equal(menv, x_xs[0], expr4),
msat_make_equal(menv, x_xs[0], expr5),
msat_make_equal(menv, x_xs[0], expr6),
msat_make_equal(menv, x_xs[0], expr7),
msat_make_equal(menv, x_xs[0], expr8),
msat_make_equal(menv, x_xs[0], expr9),
msat_make_equal(menv, x_xs[0], expr10),
msat_make_equal(menv, x_xs[0], expr11),
msat_make_equal(menv, x_xs[0], expr12),
msat_make_equal(menv, x_xs[0], expr13),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_12_0)
expr1 = msat_make_plus(menv, xs[1], n_8_0)
expr2 = msat_make_plus(menv, xs[3], n_11_0)
expr3 = msat_make_plus(menv, xs[4], n_7_0)
expr4 = msat_make_plus(menv, xs[6], n_8_0)
expr5 = msat_make_plus(menv, xs[7], n_19_0)
expr6 = msat_make_plus(menv, xs[8], n_18_0)
expr7 = msat_make_plus(menv, xs[10], n_8_0)
expr8 = msat_make_plus(menv, xs[12], n_20_0)
expr9 = msat_make_plus(menv, xs[13], n_10_0)
expr10 = msat_make_plus(menv, xs[18], n_19_0)
expr11 = msat_make_plus(menv, xs[22], n_16_0)
expr12 = msat_make_plus(menv, xs[24], n_14_0)
expr13 = msat_make_plus(menv, xs[26], n_7_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[1], expr0),
msat_make_geq(menv, x_xs[1], expr1),
msat_make_geq(menv, x_xs[1], expr2),
msat_make_geq(menv, x_xs[1], expr3),
msat_make_geq(menv, x_xs[1], expr4),
msat_make_geq(menv, x_xs[1], expr5),
msat_make_geq(menv, x_xs[1], expr6),
msat_make_geq(menv, x_xs[1], expr7),
msat_make_geq(menv, x_xs[1], expr8),
msat_make_geq(menv, x_xs[1], expr9),
msat_make_geq(menv, x_xs[1], expr10),
msat_make_geq(menv, x_xs[1], expr11),
msat_make_geq(menv, x_xs[1], expr12),
msat_make_geq(menv, x_xs[1], expr13),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[1], expr0),
msat_make_equal(menv, x_xs[1], expr1),
msat_make_equal(menv, x_xs[1], expr2),
msat_make_equal(menv, x_xs[1], expr3),
msat_make_equal(menv, x_xs[1], expr4),
msat_make_equal(menv, x_xs[1], expr5),
msat_make_equal(menv, x_xs[1], expr6),
msat_make_equal(menv, x_xs[1], expr7),
msat_make_equal(menv, x_xs[1], expr8),
msat_make_equal(menv, x_xs[1], expr9),
msat_make_equal(menv, x_xs[1], expr10),
msat_make_equal(menv, x_xs[1], expr11),
msat_make_equal(menv, x_xs[1], expr12),
msat_make_equal(menv, x_xs[1], expr13),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_5_0)
expr1 = msat_make_plus(menv, xs[3], n_15_0)
expr2 = msat_make_plus(menv, xs[4], n_13_0)
expr3 = msat_make_plus(menv, xs[7], n_14_0)
expr4 = msat_make_plus(menv, xs[8], n_9_0)
expr5 = msat_make_plus(menv, xs[9], n_8_0)
expr6 = msat_make_plus(menv, xs[10], n_15_0)
expr7 = msat_make_plus(menv, xs[12], n_20_0)
expr8 = msat_make_plus(menv, xs[13], n_14_0)
expr9 = msat_make_plus(menv, xs[16], n_10_0)
expr10 = msat_make_plus(menv, xs[21], n_18_0)
expr11 = msat_make_plus(menv, xs[23], n_11_0)
expr12 = msat_make_plus(menv, xs[26], n_16_0)
expr13 = msat_make_plus(menv, xs[27], n_4_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[2], expr0),
msat_make_geq(menv, x_xs[2], expr1),
msat_make_geq(menv, x_xs[2], expr2),
msat_make_geq(menv, x_xs[2], expr3),
msat_make_geq(menv, x_xs[2], expr4),
msat_make_geq(menv, x_xs[2], expr5),
msat_make_geq(menv, x_xs[2], expr6),
msat_make_geq(menv, x_xs[2], expr7),
msat_make_geq(menv, x_xs[2], expr8),
msat_make_geq(menv, x_xs[2], expr9),
msat_make_geq(menv, x_xs[2], expr10),
msat_make_geq(menv, x_xs[2], expr11),
msat_make_geq(menv, x_xs[2], expr12),
msat_make_geq(menv, x_xs[2], expr13),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[2], expr0),
msat_make_equal(menv, x_xs[2], expr1),
msat_make_equal(menv, x_xs[2], expr2),
msat_make_equal(menv, x_xs[2], expr3),
msat_make_equal(menv, x_xs[2], expr4),
msat_make_equal(menv, x_xs[2], expr5),
msat_make_equal(menv, x_xs[2], expr6),
msat_make_equal(menv, x_xs[2], expr7),
msat_make_equal(menv, x_xs[2], expr8),
msat_make_equal(menv, x_xs[2], expr9),
msat_make_equal(menv, x_xs[2], expr10),
msat_make_equal(menv, x_xs[2], expr11),
msat_make_equal(menv, x_xs[2], expr12),
msat_make_equal(menv, x_xs[2], expr13),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_9_0)
expr1 = msat_make_plus(menv, xs[1], n_5_0)
expr2 = msat_make_plus(menv, xs[3], n_9_0)
expr3 = msat_make_plus(menv, xs[4], n_8_0)
expr4 = msat_make_plus(menv, xs[5], n_7_0)
expr5 = msat_make_plus(menv, xs[8], n_4_0)
expr6 = msat_make_plus(menv, xs[11], n_16_0)
expr7 = msat_make_plus(menv, xs[14], n_11_0)
expr8 = msat_make_plus(menv, xs[15], n_18_0)
expr9 = msat_make_plus(menv, xs[19], n_1_0)
expr10 = msat_make_plus(menv, xs[20], n_12_0)
expr11 = msat_make_plus(menv, xs[22], n_7_0)
expr12 = msat_make_plus(menv, xs[24], n_2_0)
expr13 = msat_make_plus(menv, xs[26], n_13_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[3], expr0),
msat_make_geq(menv, x_xs[3], expr1),
msat_make_geq(menv, x_xs[3], expr2),
msat_make_geq(menv, x_xs[3], expr3),
msat_make_geq(menv, x_xs[3], expr4),
msat_make_geq(menv, x_xs[3], expr5),
msat_make_geq(menv, x_xs[3], expr6),
msat_make_geq(menv, x_xs[3], expr7),
msat_make_geq(menv, x_xs[3], expr8),
msat_make_geq(menv, x_xs[3], expr9),
msat_make_geq(menv, x_xs[3], expr10),
msat_make_geq(menv, x_xs[3], expr11),
msat_make_geq(menv, x_xs[3], expr12),
msat_make_geq(menv, x_xs[3], expr13),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[3], expr0),
msat_make_equal(menv, x_xs[3], expr1),
msat_make_equal(menv, x_xs[3], expr2),
msat_make_equal(menv, x_xs[3], expr3),
msat_make_equal(menv, x_xs[3], expr4),
msat_make_equal(menv, x_xs[3], expr5),
msat_make_equal(menv, x_xs[3], expr6),
msat_make_equal(menv, x_xs[3], expr7),
msat_make_equal(menv, x_xs[3], expr8),
msat_make_equal(menv, x_xs[3], expr9),
msat_make_equal(menv, x_xs[3], expr10),
msat_make_equal(menv, x_xs[3], expr11),
msat_make_equal(menv, x_xs[3], expr12),
msat_make_equal(menv, x_xs[3], expr13),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[2], n_19_0)
expr1 = msat_make_plus(menv, xs[4], n_17_0)
expr2 = msat_make_plus(menv, xs[5], n_14_0)
expr3 = msat_make_plus(menv, xs[9], n_7_0)
expr4 = msat_make_plus(menv, xs[12], n_20_0)
expr5 = msat_make_plus(menv, xs[13], n_2_0)
expr6 = msat_make_plus(menv, xs[15], n_9_0)
expr7 = msat_make_plus(menv, xs[16], n_20_0)
expr8 = msat_make_plus(menv, xs[17], n_14_0)
expr9 = msat_make_plus(menv, xs[19], n_17_0)
expr10 = msat_make_plus(menv, xs[22], n_10_0)
expr11 = msat_make_plus(menv, xs[23], n_6_0)
expr12 = msat_make_plus(menv, xs[24], n_10_0)
expr13 = msat_make_plus(menv, xs[26], n_1_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[4], expr0),
msat_make_geq(menv, x_xs[4], expr1),
msat_make_geq(menv, x_xs[4], expr2),
msat_make_geq(menv, x_xs[4], expr3),
msat_make_geq(menv, x_xs[4], expr4),
msat_make_geq(menv, x_xs[4], expr5),
msat_make_geq(menv, x_xs[4], expr6),
msat_make_geq(menv, x_xs[4], expr7),
msat_make_geq(menv, x_xs[4], expr8),
msat_make_geq(menv, x_xs[4], expr9),
msat_make_geq(menv, x_xs[4], expr10),
msat_make_geq(menv, x_xs[4], expr11),
msat_make_geq(menv, x_xs[4], expr12),
msat_make_geq(menv, x_xs[4], expr13),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[4], expr0),
msat_make_equal(menv, x_xs[4], expr1),
msat_make_equal(menv, x_xs[4], expr2),
msat_make_equal(menv, x_xs[4], expr3),
msat_make_equal(menv, x_xs[4], expr4),
msat_make_equal(menv, x_xs[4], expr5),
msat_make_equal(menv, x_xs[4], expr6),
msat_make_equal(menv, x_xs[4], expr7),
msat_make_equal(menv, x_xs[4], expr8),
msat_make_equal(menv, x_xs[4], expr9),
msat_make_equal(menv, x_xs[4], expr10),
msat_make_equal(menv, x_xs[4], expr11),
msat_make_equal(menv, x_xs[4], expr12),
msat_make_equal(menv, x_xs[4], expr13),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[2], n_4_0)
expr1 = msat_make_plus(menv, xs[4], n_13_0)
expr2 = msat_make_plus(menv, xs[5], n_20_0)
expr3 = msat_make_plus(menv, xs[6], n_11_0)
expr4 = msat_make_plus(menv, xs[7], n_18_0)
expr5 = msat_make_plus(menv, xs[9], n_17_0)
expr6 = msat_make_plus(menv, xs[10], n_16_0)
expr7 = msat_make_plus(menv, xs[11], n_18_0)
expr8 = msat_make_plus(menv, xs[12], n_19_0)
expr9 = msat_make_plus(menv, xs[15], n_15_0)
expr10 = msat_make_plus(menv, xs[16], n_15_0)
expr11 = msat_make_plus(menv, xs[17], n_13_0)
expr12 = msat_make_plus(menv, xs[23], n_17_0)
expr13 = msat_make_plus(menv, xs[27], n_2_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[5], expr0),
msat_make_geq(menv, x_xs[5], expr1),
msat_make_geq(menv, x_xs[5], expr2),
msat_make_geq(menv, x_xs[5], expr3),
msat_make_geq(menv, x_xs[5], expr4),
msat_make_geq(menv, x_xs[5], expr5),
msat_make_geq(menv, x_xs[5], expr6),
msat_make_geq(menv, x_xs[5], expr7),
msat_make_geq(menv, x_xs[5], expr8),
msat_make_geq(menv, x_xs[5], expr9),
msat_make_geq(menv, x_xs[5], expr10),
msat_make_geq(menv, x_xs[5], expr11),
msat_make_geq(menv, x_xs[5], expr12),
msat_make_geq(menv, x_xs[5], expr13),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[5], expr0),
msat_make_equal(menv, x_xs[5], expr1),
msat_make_equal(menv, x_xs[5], expr2),
msat_make_equal(menv, x_xs[5], expr3),
msat_make_equal(menv, x_xs[5], expr4),
msat_make_equal(menv, x_xs[5], expr5),
msat_make_equal(menv, x_xs[5], expr6),
msat_make_equal(menv, x_xs[5], expr7),
msat_make_equal(menv, x_xs[5], expr8),
msat_make_equal(menv, x_xs[5], expr9),
msat_make_equal(menv, x_xs[5], expr10),
msat_make_equal(menv, x_xs[5], expr11),
msat_make_equal(menv, x_xs[5], expr12),
msat_make_equal(menv, x_xs[5], expr13),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[5], n_5_0)
expr1 = msat_make_plus(menv, xs[6], n_7_0)
expr2 = msat_make_plus(menv, xs[7], n_7_0)
expr3 = msat_make_plus(menv, xs[8], n_5_0)
expr4 = msat_make_plus(menv, xs[9], n_11_0)
expr5 = msat_make_plus(menv, xs[10], n_11_0)
expr6 = msat_make_plus(menv, xs[13], n_4_0)
expr7 = msat_make_plus(menv, xs[14], n_1_0)
expr8 = msat_make_plus(menv, xs[15], n_15_0)
expr9 = msat_make_plus(menv, xs[16], n_1_0)
expr10 = msat_make_plus(menv, xs[19], n_5_0)
expr11 = msat_make_plus(menv, xs[20], n_11_0)
expr12 = msat_make_plus(menv, xs[22], n_13_0)
expr13 = msat_make_plus(menv, xs[26], n_12_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[6], expr0),
msat_make_geq(menv, x_xs[6], expr1),
msat_make_geq(menv, x_xs[6], expr2),
msat_make_geq(menv, x_xs[6], expr3),
msat_make_geq(menv, x_xs[6], expr4),
msat_make_geq(menv, x_xs[6], expr5),
msat_make_geq(menv, x_xs[6], expr6),
msat_make_geq(menv, x_xs[6], expr7),
msat_make_geq(menv, x_xs[6], expr8),
msat_make_geq(menv, x_xs[6], expr9),
msat_make_geq(menv, x_xs[6], expr10),
msat_make_geq(menv, x_xs[6], expr11),
msat_make_geq(menv, x_xs[6], expr12),
msat_make_geq(menv, x_xs[6], expr13),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[6], expr0),
msat_make_equal(menv, x_xs[6], expr1),
msat_make_equal(menv, x_xs[6], expr2),
msat_make_equal(menv, x_xs[6], expr3),
msat_make_equal(menv, x_xs[6], expr4),
msat_make_equal(menv, x_xs[6], expr5),
msat_make_equal(menv, x_xs[6], expr6),
msat_make_equal(menv, x_xs[6], expr7),
msat_make_equal(menv, x_xs[6], expr8),
msat_make_equal(menv, x_xs[6], expr9),
msat_make_equal(menv, x_xs[6], expr10),
msat_make_equal(menv, x_xs[6], expr11),
msat_make_equal(menv, x_xs[6], expr12),
msat_make_equal(menv, x_xs[6], expr13),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[5], n_10_0)
expr1 = msat_make_plus(menv, xs[7], n_5_0)
expr2 = msat_make_plus(menv, xs[8], n_8_0)
expr3 = msat_make_plus(menv, xs[11], n_6_0)
expr4 = msat_make_plus(menv, xs[13], n_16_0)
expr5 = msat_make_plus(menv, xs[15], n_17_0)
expr6 = msat_make_plus(menv, xs[16], n_18_0)
expr7 = msat_make_plus(menv, xs[17], n_6_0)
expr8 = msat_make_plus(menv, xs[21], n_19_0)
expr9 = msat_make_plus(menv, xs[23], n_16_0)
expr10 = msat_make_plus(menv, xs[24], n_13_0)
expr11 = msat_make_plus(menv, xs[25], n_19_0)
expr12 = msat_make_plus(menv, xs[26], n_15_0)
expr13 = msat_make_plus(menv, xs[27], n_18_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[7], expr0),
msat_make_geq(menv, x_xs[7], expr1),
msat_make_geq(menv, x_xs[7], expr2),
msat_make_geq(menv, x_xs[7], expr3),
msat_make_geq(menv, x_xs[7], expr4),
msat_make_geq(menv, x_xs[7], expr5),
msat_make_geq(menv, x_xs[7], expr6),
msat_make_geq(menv, x_xs[7], expr7),
msat_make_geq(menv, x_xs[7], expr8),
msat_make_geq(menv, x_xs[7], expr9),
msat_make_geq(menv, x_xs[7], expr10),
msat_make_geq(menv, x_xs[7], expr11),
msat_make_geq(menv, x_xs[7], expr12),
msat_make_geq(menv, x_xs[7], expr13),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[7], expr0),
msat_make_equal(menv, x_xs[7], expr1),
msat_make_equal(menv, x_xs[7], expr2),
msat_make_equal(menv, x_xs[7], expr3),
msat_make_equal(menv, x_xs[7], expr4),
msat_make_equal(menv, x_xs[7], expr5),
msat_make_equal(menv, x_xs[7], expr6),
msat_make_equal(menv, x_xs[7], expr7),
msat_make_equal(menv, x_xs[7], expr8),
msat_make_equal(menv, x_xs[7], expr9),
msat_make_equal(menv, x_xs[7], expr10),
msat_make_equal(menv, x_xs[7], expr11),
msat_make_equal(menv, x_xs[7], expr12),
msat_make_equal(menv, x_xs[7], expr13),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[2], n_2_0)
expr1 = msat_make_plus(menv, xs[4], n_3_0)
expr2 = msat_make_plus(menv, xs[5], n_11_0)
expr3 = msat_make_plus(menv, xs[6], n_13_0)
expr4 = msat_make_plus(menv, xs[7], n_16_0)
expr5 = msat_make_plus(menv, xs[8], n_11_0)
expr6 = msat_make_plus(menv, xs[10], n_11_0)
expr7 = msat_make_plus(menv, xs[14], n_7_0)
expr8 = msat_make_plus(menv, xs[15], n_13_0)
expr9 = msat_make_plus(menv, xs[16], n_15_0)
expr10 = msat_make_plus(menv, xs[17], n_5_0)
expr11 = msat_make_plus(menv, xs[23], n_5_0)
expr12 = msat_make_plus(menv, xs[24], n_15_0)
expr13 = msat_make_plus(menv, xs[25], n_3_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[8], expr0),
msat_make_geq(menv, x_xs[8], expr1),
msat_make_geq(menv, x_xs[8], expr2),
msat_make_geq(menv, x_xs[8], expr3),
msat_make_geq(menv, x_xs[8], expr4),
msat_make_geq(menv, x_xs[8], expr5),
msat_make_geq(menv, x_xs[8], expr6),
msat_make_geq(menv, x_xs[8], expr7),
msat_make_geq(menv, x_xs[8], expr8),
msat_make_geq(menv, x_xs[8], expr9),
msat_make_geq(menv, x_xs[8], expr10),
msat_make_geq(menv, x_xs[8], expr11),
msat_make_geq(menv, x_xs[8], expr12),
msat_make_geq(menv, x_xs[8], expr13),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[8], expr0),
msat_make_equal(menv, x_xs[8], expr1),
msat_make_equal(menv, x_xs[8], expr2),
msat_make_equal(menv, x_xs[8], expr3),
msat_make_equal(menv, x_xs[8], expr4),
msat_make_equal(menv, x_xs[8], expr5),
msat_make_equal(menv, x_xs[8], expr6),
msat_make_equal(menv, x_xs[8], expr7),
msat_make_equal(menv, x_xs[8], expr8),
msat_make_equal(menv, x_xs[8], expr9),
msat_make_equal(menv, x_xs[8], expr10),
msat_make_equal(menv, x_xs[8], expr11),
msat_make_equal(menv, x_xs[8], expr12),
msat_make_equal(menv, x_xs[8], expr13),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_8_0)
expr1 = msat_make_plus(menv, xs[1], n_2_0)
expr2 = msat_make_plus(menv, xs[3], n_5_0)
expr3 = msat_make_plus(menv, xs[4], n_3_0)
expr4 = msat_make_plus(menv, xs[5], n_11_0)
expr5 = msat_make_plus(menv, xs[7], n_9_0)
expr6 = msat_make_plus(menv, xs[10], n_4_0)
expr7 = msat_make_plus(menv, xs[11], n_16_0)
expr8 = msat_make_plus(menv, xs[12], n_9_0)
expr9 = msat_make_plus(menv, xs[13], n_13_0)
expr10 = msat_make_plus(menv, xs[14], n_15_0)
expr11 = msat_make_plus(menv, xs[17], n_9_0)
expr12 = msat_make_plus(menv, xs[21], n_20_0)
expr13 = msat_make_plus(menv, xs[27], n_12_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[9], expr0),
msat_make_geq(menv, x_xs[9], expr1),
msat_make_geq(menv, x_xs[9], expr2),
msat_make_geq(menv, x_xs[9], expr3),
msat_make_geq(menv, x_xs[9], expr4),
msat_make_geq(menv, x_xs[9], expr5),
msat_make_geq(menv, x_xs[9], expr6),
msat_make_geq(menv, x_xs[9], expr7),
msat_make_geq(menv, x_xs[9], expr8),
msat_make_geq(menv, x_xs[9], expr9),
msat_make_geq(menv, x_xs[9], expr10),
msat_make_geq(menv, x_xs[9], expr11),
msat_make_geq(menv, x_xs[9], expr12),
msat_make_geq(menv, x_xs[9], expr13),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[9], expr0),
msat_make_equal(menv, x_xs[9], expr1),
msat_make_equal(menv, x_xs[9], expr2),
msat_make_equal(menv, x_xs[9], expr3),
msat_make_equal(menv, x_xs[9], expr4),
msat_make_equal(menv, x_xs[9], expr5),
msat_make_equal(menv, x_xs[9], expr6),
msat_make_equal(menv, x_xs[9], expr7),
msat_make_equal(menv, x_xs[9], expr8),
msat_make_equal(menv, x_xs[9], expr9),
msat_make_equal(menv, x_xs[9], expr10),
msat_make_equal(menv, x_xs[9], expr11),
msat_make_equal(menv, x_xs[9], expr12),
msat_make_equal(menv, x_xs[9], expr13),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[3], n_7_0)
expr1 = msat_make_plus(menv, xs[4], n_2_0)
expr2 = msat_make_plus(menv, xs[8], n_11_0)
expr3 = msat_make_plus(menv, xs[9], n_2_0)
expr4 = msat_make_plus(menv, xs[10], n_10_0)
expr5 = msat_make_plus(menv, xs[12], n_5_0)
expr6 = msat_make_plus(menv, xs[13], n_8_0)
expr7 = msat_make_plus(menv, xs[16], n_9_0)
expr8 = msat_make_plus(menv, xs[18], n_13_0)
expr9 = msat_make_plus(menv, xs[20], n_20_0)
expr10 = msat_make_plus(menv, xs[24], n_18_0)
expr11 = msat_make_plus(menv, xs[25], n_17_0)
expr12 = msat_make_plus(menv, xs[26], n_17_0)
expr13 = msat_make_plus(menv, xs[27], n_10_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[10], expr0),
msat_make_geq(menv, x_xs[10], expr1),
msat_make_geq(menv, x_xs[10], expr2),
msat_make_geq(menv, x_xs[10], expr3),
msat_make_geq(menv, x_xs[10], expr4),
msat_make_geq(menv, x_xs[10], expr5),
msat_make_geq(menv, x_xs[10], expr6),
msat_make_geq(menv, x_xs[10], expr7),
msat_make_geq(menv, x_xs[10], expr8),
msat_make_geq(menv, x_xs[10], expr9),
msat_make_geq(menv, x_xs[10], expr10),
msat_make_geq(menv, x_xs[10], expr11),
msat_make_geq(menv, x_xs[10], expr12),
msat_make_geq(menv, x_xs[10], expr13),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[10], expr0),
msat_make_equal(menv, x_xs[10], expr1),
msat_make_equal(menv, x_xs[10], expr2),
msat_make_equal(menv, x_xs[10], expr3),
msat_make_equal(menv, x_xs[10], expr4),
msat_make_equal(menv, x_xs[10], expr5),
msat_make_equal(menv, x_xs[10], expr6),
msat_make_equal(menv, x_xs[10], expr7),
msat_make_equal(menv, x_xs[10], expr8),
msat_make_equal(menv, x_xs[10], expr9),
msat_make_equal(menv, x_xs[10], expr10),
msat_make_equal(menv, x_xs[10], expr11),
msat_make_equal(menv, x_xs[10], expr12),
msat_make_equal(menv, x_xs[10], expr13),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_15_0)
expr1 = msat_make_plus(menv, xs[1], n_13_0)
expr2 = msat_make_plus(menv, xs[4], n_16_0)
expr3 = msat_make_plus(menv, xs[5], n_3_0)
expr4 = msat_make_plus(menv, xs[8], n_11_0)
expr5 = msat_make_plus(menv, xs[13], n_12_0)
expr6 = msat_make_plus(menv, xs[15], n_17_0)
expr7 = msat_make_plus(menv, xs[16], n_6_0)
expr8 = msat_make_plus(menv, xs[17], n_19_0)
expr9 = msat_make_plus(menv, xs[19], n_8_0)
expr10 = msat_make_plus(menv, xs[20], n_20_0)
expr11 = msat_make_plus(menv, xs[22], n_11_0)
expr12 = msat_make_plus(menv, xs[23], n_4_0)
expr13 = msat_make_plus(menv, xs[25], n_11_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[11], expr0),
msat_make_geq(menv, x_xs[11], expr1),
msat_make_geq(menv, x_xs[11], expr2),
msat_make_geq(menv, x_xs[11], expr3),
msat_make_geq(menv, x_xs[11], expr4),
msat_make_geq(menv, x_xs[11], expr5),
msat_make_geq(menv, x_xs[11], expr6),
msat_make_geq(menv, x_xs[11], expr7),
msat_make_geq(menv, x_xs[11], expr8),
msat_make_geq(menv, x_xs[11], expr9),
msat_make_geq(menv, x_xs[11], expr10),
msat_make_geq(menv, x_xs[11], expr11),
msat_make_geq(menv, x_xs[11], expr12),
msat_make_geq(menv, x_xs[11], expr13),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[11], expr0),
msat_make_equal(menv, x_xs[11], expr1),
msat_make_equal(menv, x_xs[11], expr2),
msat_make_equal(menv, x_xs[11], expr3),
msat_make_equal(menv, x_xs[11], expr4),
msat_make_equal(menv, x_xs[11], expr5),
msat_make_equal(menv, x_xs[11], expr6),
msat_make_equal(menv, x_xs[11], expr7),
msat_make_equal(menv, x_xs[11], expr8),
msat_make_equal(menv, x_xs[11], expr9),
msat_make_equal(menv, x_xs[11], expr10),
msat_make_equal(menv, x_xs[11], expr11),
msat_make_equal(menv, x_xs[11], expr12),
msat_make_equal(menv, x_xs[11], expr13),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_19_0)
expr1 = msat_make_plus(menv, xs[2], n_17_0)
expr2 = msat_make_plus(menv, xs[3], n_11_0)
expr3 = msat_make_plus(menv, xs[6], n_6_0)
expr4 = msat_make_plus(menv, xs[7], n_6_0)
expr5 = msat_make_plus(menv, xs[9], n_9_0)
expr6 = msat_make_plus(menv, xs[13], n_19_0)
expr7 = msat_make_plus(menv, xs[17], n_12_0)
expr8 = msat_make_plus(menv, xs[18], n_10_0)
expr9 = msat_make_plus(menv, xs[19], n_3_0)
expr10 = msat_make_plus(menv, xs[21], n_4_0)
expr11 = msat_make_plus(menv, xs[22], n_12_0)
expr12 = msat_make_plus(menv, xs[24], n_4_0)
expr13 = msat_make_plus(menv, xs[25], n_15_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[12], expr0),
msat_make_geq(menv, x_xs[12], expr1),
msat_make_geq(menv, x_xs[12], expr2),
msat_make_geq(menv, x_xs[12], expr3),
msat_make_geq(menv, x_xs[12], expr4),
msat_make_geq(menv, x_xs[12], expr5),
msat_make_geq(menv, x_xs[12], expr6),
msat_make_geq(menv, x_xs[12], expr7),
msat_make_geq(menv, x_xs[12], expr8),
msat_make_geq(menv, x_xs[12], expr9),
msat_make_geq(menv, x_xs[12], expr10),
msat_make_geq(menv, x_xs[12], expr11),
msat_make_geq(menv, x_xs[12], expr12),
msat_make_geq(menv, x_xs[12], expr13),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[12], expr0),
msat_make_equal(menv, x_xs[12], expr1),
msat_make_equal(menv, x_xs[12], expr2),
msat_make_equal(menv, x_xs[12], expr3),
msat_make_equal(menv, x_xs[12], expr4),
msat_make_equal(menv, x_xs[12], expr5),
msat_make_equal(menv, x_xs[12], expr6),
msat_make_equal(menv, x_xs[12], expr7),
msat_make_equal(menv, x_xs[12], expr8),
msat_make_equal(menv, x_xs[12], expr9),
msat_make_equal(menv, x_xs[12], expr10),
msat_make_equal(menv, x_xs[12], expr11),
msat_make_equal(menv, x_xs[12], expr12),
msat_make_equal(menv, x_xs[12], expr13),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_5_0)
expr1 = msat_make_plus(menv, xs[2], n_6_0)
expr2 = msat_make_plus(menv, xs[3], n_19_0)
expr3 = msat_make_plus(menv, xs[5], n_9_0)
expr4 = msat_make_plus(menv, xs[9], n_9_0)
expr5 = msat_make_plus(menv, xs[10], n_14_0)
expr6 = msat_make_plus(menv, xs[13], n_7_0)
expr7 = msat_make_plus(menv, xs[14], n_14_0)
expr8 = msat_make_plus(menv, xs[15], n_17_0)
expr9 = msat_make_plus(menv, xs[17], n_3_0)
expr10 = msat_make_plus(menv, xs[22], n_10_0)
expr11 = msat_make_plus(menv, xs[24], n_16_0)
expr12 = msat_make_plus(menv, xs[25], n_4_0)
expr13 = msat_make_plus(menv, xs[26], n_7_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[13], expr0),
msat_make_geq(menv, x_xs[13], expr1),
msat_make_geq(menv, x_xs[13], expr2),
msat_make_geq(menv, x_xs[13], expr3),
msat_make_geq(menv, x_xs[13], expr4),
msat_make_geq(menv, x_xs[13], expr5),
msat_make_geq(menv, x_xs[13], expr6),
msat_make_geq(menv, x_xs[13], expr7),
msat_make_geq(menv, x_xs[13], expr8),
msat_make_geq(menv, x_xs[13], expr9),
msat_make_geq(menv, x_xs[13], expr10),
msat_make_geq(menv, x_xs[13], expr11),
msat_make_geq(menv, x_xs[13], expr12),
msat_make_geq(menv, x_xs[13], expr13),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[13], expr0),
msat_make_equal(menv, x_xs[13], expr1),
msat_make_equal(menv, x_xs[13], expr2),
msat_make_equal(menv, x_xs[13], expr3),
msat_make_equal(menv, x_xs[13], expr4),
msat_make_equal(menv, x_xs[13], expr5),
msat_make_equal(menv, x_xs[13], expr6),
msat_make_equal(menv, x_xs[13], expr7),
msat_make_equal(menv, x_xs[13], expr8),
msat_make_equal(menv, x_xs[13], expr9),
msat_make_equal(menv, x_xs[13], expr10),
msat_make_equal(menv, x_xs[13], expr11),
msat_make_equal(menv, x_xs[13], expr12),
msat_make_equal(menv, x_xs[13], expr13),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_13_0)
expr1 = msat_make_plus(menv, xs[1], n_14_0)
expr2 = msat_make_plus(menv, xs[4], n_1_0)
expr3 = msat_make_plus(menv, xs[5], n_16_0)
expr4 = msat_make_plus(menv, xs[7], n_10_0)
expr5 = msat_make_plus(menv, xs[9], n_16_0)
expr6 = msat_make_plus(menv, xs[11], n_2_0)
expr7 = msat_make_plus(menv, xs[14], n_9_0)
expr8 = msat_make_plus(menv, xs[15], n_19_0)
expr9 = msat_make_plus(menv, xs[16], n_16_0)
expr10 = msat_make_plus(menv, xs[17], n_15_0)
expr11 = msat_make_plus(menv, xs[20], n_16_0)
expr12 = msat_make_plus(menv, xs[25], n_16_0)
expr13 = msat_make_plus(menv, xs[27], n_10_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[14], expr0),
msat_make_geq(menv, x_xs[14], expr1),
msat_make_geq(menv, x_xs[14], expr2),
msat_make_geq(menv, x_xs[14], expr3),
msat_make_geq(menv, x_xs[14], expr4),
msat_make_geq(menv, x_xs[14], expr5),
msat_make_geq(menv, x_xs[14], expr6),
msat_make_geq(menv, x_xs[14], expr7),
msat_make_geq(menv, x_xs[14], expr8),
msat_make_geq(menv, x_xs[14], expr9),
msat_make_geq(menv, x_xs[14], expr10),
msat_make_geq(menv, x_xs[14], expr11),
msat_make_geq(menv, x_xs[14], expr12),
msat_make_geq(menv, x_xs[14], expr13),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[14], expr0),
msat_make_equal(menv, x_xs[14], expr1),
msat_make_equal(menv, x_xs[14], expr2),
msat_make_equal(menv, x_xs[14], expr3),
msat_make_equal(menv, x_xs[14], expr4),
msat_make_equal(menv, x_xs[14], expr5),
msat_make_equal(menv, x_xs[14], expr6),
msat_make_equal(menv, x_xs[14], expr7),
msat_make_equal(menv, x_xs[14], expr8),
msat_make_equal(menv, x_xs[14], expr9),
msat_make_equal(menv, x_xs[14], expr10),
msat_make_equal(menv, x_xs[14], expr11),
msat_make_equal(menv, x_xs[14], expr12),
msat_make_equal(menv, x_xs[14], expr13),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_17_0)
expr1 = msat_make_plus(menv, xs[1], n_16_0)
expr2 = msat_make_plus(menv, xs[3], n_3_0)
expr3 = msat_make_plus(menv, xs[6], n_20_0)
expr4 = msat_make_plus(menv, xs[7], n_7_0)
expr5 = msat_make_plus(menv, xs[9], n_20_0)
expr6 = msat_make_plus(menv, xs[10], n_20_0)
expr7 = msat_make_plus(menv, xs[16], n_10_0)
expr8 = msat_make_plus(menv, xs[17], n_4_0)
expr9 = msat_make_plus(menv, xs[19], n_6_0)
expr10 = msat_make_plus(menv, xs[22], n_1_0)
expr11 = msat_make_plus(menv, xs[23], n_12_0)
expr12 = msat_make_plus(menv, xs[24], n_5_0)
expr13 = msat_make_plus(menv, xs[27], n_12_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[15], expr0),
msat_make_geq(menv, x_xs[15], expr1),
msat_make_geq(menv, x_xs[15], expr2),
msat_make_geq(menv, x_xs[15], expr3),
msat_make_geq(menv, x_xs[15], expr4),
msat_make_geq(menv, x_xs[15], expr5),
msat_make_geq(menv, x_xs[15], expr6),
msat_make_geq(menv, x_xs[15], expr7),
msat_make_geq(menv, x_xs[15], expr8),
msat_make_geq(menv, x_xs[15], expr9),
msat_make_geq(menv, x_xs[15], expr10),
msat_make_geq(menv, x_xs[15], expr11),
msat_make_geq(menv, x_xs[15], expr12),
msat_make_geq(menv, x_xs[15], expr13),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[15], expr0),
msat_make_equal(menv, x_xs[15], expr1),
msat_make_equal(menv, x_xs[15], expr2),
msat_make_equal(menv, x_xs[15], expr3),
msat_make_equal(menv, x_xs[15], expr4),
msat_make_equal(menv, x_xs[15], expr5),
msat_make_equal(menv, x_xs[15], expr6),
msat_make_equal(menv, x_xs[15], expr7),
msat_make_equal(menv, x_xs[15], expr8),
msat_make_equal(menv, x_xs[15], expr9),
msat_make_equal(menv, x_xs[15], expr10),
msat_make_equal(menv, x_xs[15], expr11),
msat_make_equal(menv, x_xs[15], expr12),
msat_make_equal(menv, x_xs[15], expr13),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[1], n_18_0)
expr1 = msat_make_plus(menv, xs[2], n_12_0)
expr2 = msat_make_plus(menv, xs[3], n_1_0)
expr3 = msat_make_plus(menv, xs[4], n_10_0)
expr4 = msat_make_plus(menv, xs[6], n_11_0)
expr5 = msat_make_plus(menv, xs[8], n_5_0)
expr6 = msat_make_plus(menv, xs[11], n_16_0)
expr7 = msat_make_plus(menv, xs[14], n_20_0)
expr8 = msat_make_plus(menv, xs[18], n_20_0)
expr9 = msat_make_plus(menv, xs[19], n_6_0)
expr10 = msat_make_plus(menv, xs[20], n_20_0)
expr11 = msat_make_plus(menv, xs[21], n_17_0)
expr12 = msat_make_plus(menv, xs[22], n_19_0)
expr13 = msat_make_plus(menv, xs[27], n_9_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[16], expr0),
msat_make_geq(menv, x_xs[16], expr1),
msat_make_geq(menv, x_xs[16], expr2),
msat_make_geq(menv, x_xs[16], expr3),
msat_make_geq(menv, x_xs[16], expr4),
msat_make_geq(menv, x_xs[16], expr5),
msat_make_geq(menv, x_xs[16], expr6),
msat_make_geq(menv, x_xs[16], expr7),
msat_make_geq(menv, x_xs[16], expr8),
msat_make_geq(menv, x_xs[16], expr9),
msat_make_geq(menv, x_xs[16], expr10),
msat_make_geq(menv, x_xs[16], expr11),
msat_make_geq(menv, x_xs[16], expr12),
msat_make_geq(menv, x_xs[16], expr13),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[16], expr0),
msat_make_equal(menv, x_xs[16], expr1),
msat_make_equal(menv, x_xs[16], expr2),
msat_make_equal(menv, x_xs[16], expr3),
msat_make_equal(menv, x_xs[16], expr4),
msat_make_equal(menv, x_xs[16], expr5),
msat_make_equal(menv, x_xs[16], expr6),
msat_make_equal(menv, x_xs[16], expr7),
msat_make_equal(menv, x_xs[16], expr8),
msat_make_equal(menv, x_xs[16], expr9),
msat_make_equal(menv, x_xs[16], expr10),
msat_make_equal(menv, x_xs[16], expr11),
msat_make_equal(menv, x_xs[16], expr12),
msat_make_equal(menv, x_xs[16], expr13),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_17_0)
expr1 = msat_make_plus(menv, xs[1], n_14_0)
expr2 = msat_make_plus(menv, xs[4], n_14_0)
expr3 = msat_make_plus(menv, xs[6], n_12_0)
expr4 = msat_make_plus(menv, xs[7], n_20_0)
expr5 = msat_make_plus(menv, xs[15], n_12_0)
expr6 = msat_make_plus(menv, xs[18], n_10_0)
expr7 = msat_make_plus(menv, xs[19], n_3_0)
expr8 = msat_make_plus(menv, xs[20], n_3_0)
expr9 = msat_make_plus(menv, xs[21], n_3_0)
expr10 = msat_make_plus(menv, xs[22], n_17_0)
expr11 = msat_make_plus(menv, xs[23], n_12_0)
expr12 = msat_make_plus(menv, xs[26], n_11_0)
expr13 = msat_make_plus(menv, xs[27], n_18_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[17], expr0),
msat_make_geq(menv, x_xs[17], expr1),
msat_make_geq(menv, x_xs[17], expr2),
msat_make_geq(menv, x_xs[17], expr3),
msat_make_geq(menv, x_xs[17], expr4),
msat_make_geq(menv, x_xs[17], expr5),
msat_make_geq(menv, x_xs[17], expr6),
msat_make_geq(menv, x_xs[17], expr7),
msat_make_geq(menv, x_xs[17], expr8),
msat_make_geq(menv, x_xs[17], expr9),
msat_make_geq(menv, x_xs[17], expr10),
msat_make_geq(menv, x_xs[17], expr11),
msat_make_geq(menv, x_xs[17], expr12),
msat_make_geq(menv, x_xs[17], expr13),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[17], expr0),
msat_make_equal(menv, x_xs[17], expr1),
msat_make_equal(menv, x_xs[17], expr2),
msat_make_equal(menv, x_xs[17], expr3),
msat_make_equal(menv, x_xs[17], expr4),
msat_make_equal(menv, x_xs[17], expr5),
msat_make_equal(menv, x_xs[17], expr6),
msat_make_equal(menv, x_xs[17], expr7),
msat_make_equal(menv, x_xs[17], expr8),
msat_make_equal(menv, x_xs[17], expr9),
msat_make_equal(menv, x_xs[17], expr10),
msat_make_equal(menv, x_xs[17], expr11),
msat_make_equal(menv, x_xs[17], expr12),
msat_make_equal(menv, x_xs[17], expr13),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_7_0)
expr1 = msat_make_plus(menv, xs[2], n_6_0)
expr2 = msat_make_plus(menv, xs[4], n_7_0)
expr3 = msat_make_plus(menv, xs[7], n_19_0)
expr4 = msat_make_plus(menv, xs[8], n_5_0)
expr5 = msat_make_plus(menv, xs[9], n_3_0)
expr6 = msat_make_plus(menv, xs[10], n_14_0)
expr7 = msat_make_plus(menv, xs[11], n_4_0)
expr8 = msat_make_plus(menv, xs[17], n_4_0)
expr9 = msat_make_plus(menv, xs[18], n_12_0)
expr10 = msat_make_plus(menv, xs[19], n_16_0)
expr11 = msat_make_plus(menv, xs[20], n_8_0)
expr12 = msat_make_plus(menv, xs[24], n_20_0)
expr13 = msat_make_plus(menv, xs[26], n_16_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[18], expr0),
msat_make_geq(menv, x_xs[18], expr1),
msat_make_geq(menv, x_xs[18], expr2),
msat_make_geq(menv, x_xs[18], expr3),
msat_make_geq(menv, x_xs[18], expr4),
msat_make_geq(menv, x_xs[18], expr5),
msat_make_geq(menv, x_xs[18], expr6),
msat_make_geq(menv, x_xs[18], expr7),
msat_make_geq(menv, x_xs[18], expr8),
msat_make_geq(menv, x_xs[18], expr9),
msat_make_geq(menv, x_xs[18], expr10),
msat_make_geq(menv, x_xs[18], expr11),
msat_make_geq(menv, x_xs[18], expr12),
msat_make_geq(menv, x_xs[18], expr13),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[18], expr0),
msat_make_equal(menv, x_xs[18], expr1),
msat_make_equal(menv, x_xs[18], expr2),
msat_make_equal(menv, x_xs[18], expr3),
msat_make_equal(menv, x_xs[18], expr4),
msat_make_equal(menv, x_xs[18], expr5),
msat_make_equal(menv, x_xs[18], expr6),
msat_make_equal(menv, x_xs[18], expr7),
msat_make_equal(menv, x_xs[18], expr8),
msat_make_equal(menv, x_xs[18], expr9),
msat_make_equal(menv, x_xs[18], expr10),
msat_make_equal(menv, x_xs[18], expr11),
msat_make_equal(menv, x_xs[18], expr12),
msat_make_equal(menv, x_xs[18], expr13),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_18_0)
expr1 = msat_make_plus(menv, xs[1], n_12_0)
expr2 = msat_make_plus(menv, xs[2], n_15_0)
expr3 = msat_make_plus(menv, xs[4], n_9_0)
expr4 = msat_make_plus(menv, xs[5], n_4_0)
expr5 = msat_make_plus(menv, xs[8], n_2_0)
expr6 = msat_make_plus(menv, xs[11], n_12_0)
expr7 = msat_make_plus(menv, xs[12], n_5_0)
expr8 = msat_make_plus(menv, xs[13], n_20_0)
expr9 = msat_make_plus(menv, xs[17], n_3_0)
expr10 = msat_make_plus(menv, xs[19], n_6_0)
expr11 = msat_make_plus(menv, xs[22], n_4_0)
expr12 = msat_make_plus(menv, xs[26], n_9_0)
expr13 = msat_make_plus(menv, xs[27], n_16_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[19], expr0),
msat_make_geq(menv, x_xs[19], expr1),
msat_make_geq(menv, x_xs[19], expr2),
msat_make_geq(menv, x_xs[19], expr3),
msat_make_geq(menv, x_xs[19], expr4),
msat_make_geq(menv, x_xs[19], expr5),
msat_make_geq(menv, x_xs[19], expr6),
msat_make_geq(menv, x_xs[19], expr7),
msat_make_geq(menv, x_xs[19], expr8),
msat_make_geq(menv, x_xs[19], expr9),
msat_make_geq(menv, x_xs[19], expr10),
msat_make_geq(menv, x_xs[19], expr11),
msat_make_geq(menv, x_xs[19], expr12),
msat_make_geq(menv, x_xs[19], expr13),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[19], expr0),
msat_make_equal(menv, x_xs[19], expr1),
msat_make_equal(menv, x_xs[19], expr2),
msat_make_equal(menv, x_xs[19], expr3),
msat_make_equal(menv, x_xs[19], expr4),
msat_make_equal(menv, x_xs[19], expr5),
msat_make_equal(menv, x_xs[19], expr6),
msat_make_equal(menv, x_xs[19], expr7),
msat_make_equal(menv, x_xs[19], expr8),
msat_make_equal(menv, x_xs[19], expr9),
msat_make_equal(menv, x_xs[19], expr10),
msat_make_equal(menv, x_xs[19], expr11),
msat_make_equal(menv, x_xs[19], expr12),
msat_make_equal(menv, x_xs[19], expr13),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_4_0)
expr1 = msat_make_plus(menv, xs[5], n_11_0)
expr2 = msat_make_plus(menv, xs[6], n_2_0)
expr3 = msat_make_plus(menv, xs[8], n_17_0)
expr4 = msat_make_plus(menv, xs[11], n_11_0)
expr5 = msat_make_plus(menv, xs[12], n_12_0)
expr6 = msat_make_plus(menv, xs[14], n_10_0)
expr7 = msat_make_plus(menv, xs[15], n_12_0)
expr8 = msat_make_plus(menv, xs[16], n_1_0)
expr9 = msat_make_plus(menv, xs[22], n_11_0)
expr10 = msat_make_plus(menv, xs[23], n_5_0)
expr11 = msat_make_plus(menv, xs[24], n_9_0)
expr12 = msat_make_plus(menv, xs[25], n_6_0)
expr13 = msat_make_plus(menv, xs[27], n_9_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[20], expr0),
msat_make_geq(menv, x_xs[20], expr1),
msat_make_geq(menv, x_xs[20], expr2),
msat_make_geq(menv, x_xs[20], expr3),
msat_make_geq(menv, x_xs[20], expr4),
msat_make_geq(menv, x_xs[20], expr5),
msat_make_geq(menv, x_xs[20], expr6),
msat_make_geq(menv, x_xs[20], expr7),
msat_make_geq(menv, x_xs[20], expr8),
msat_make_geq(menv, x_xs[20], expr9),
msat_make_geq(menv, x_xs[20], expr10),
msat_make_geq(menv, x_xs[20], expr11),
msat_make_geq(menv, x_xs[20], expr12),
msat_make_geq(menv, x_xs[20], expr13),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[20], expr0),
msat_make_equal(menv, x_xs[20], expr1),
msat_make_equal(menv, x_xs[20], expr2),
msat_make_equal(menv, x_xs[20], expr3),
msat_make_equal(menv, x_xs[20], expr4),
msat_make_equal(menv, x_xs[20], expr5),
msat_make_equal(menv, x_xs[20], expr6),
msat_make_equal(menv, x_xs[20], expr7),
msat_make_equal(menv, x_xs[20], expr8),
msat_make_equal(menv, x_xs[20], expr9),
msat_make_equal(menv, x_xs[20], expr10),
msat_make_equal(menv, x_xs[20], expr11),
msat_make_equal(menv, x_xs[20], expr12),
msat_make_equal(menv, x_xs[20], expr13),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_7_0)
expr1 = msat_make_plus(menv, xs[3], n_9_0)
expr2 = msat_make_plus(menv, xs[7], n_11_0)
expr3 = msat_make_plus(menv, xs[8], n_17_0)
expr4 = msat_make_plus(menv, xs[13], n_20_0)
expr5 = msat_make_plus(menv, xs[15], n_4_0)
expr6 = msat_make_plus(menv, xs[16], n_5_0)
expr7 = msat_make_plus(menv, xs[18], n_11_0)
expr8 = msat_make_plus(menv, xs[21], n_8_0)
expr9 = msat_make_plus(menv, xs[22], n_9_0)
expr10 = msat_make_plus(menv, xs[24], n_5_0)
expr11 = msat_make_plus(menv, xs[25], n_7_0)
expr12 = msat_make_plus(menv, xs[26], n_3_0)
expr13 = msat_make_plus(menv, xs[27], n_11_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[21], expr0),
msat_make_geq(menv, x_xs[21], expr1),
msat_make_geq(menv, x_xs[21], expr2),
msat_make_geq(menv, x_xs[21], expr3),
msat_make_geq(menv, x_xs[21], expr4),
msat_make_geq(menv, x_xs[21], expr5),
msat_make_geq(menv, x_xs[21], expr6),
msat_make_geq(menv, x_xs[21], expr7),
msat_make_geq(menv, x_xs[21], expr8),
msat_make_geq(menv, x_xs[21], expr9),
msat_make_geq(menv, x_xs[21], expr10),
msat_make_geq(menv, x_xs[21], expr11),
msat_make_geq(menv, x_xs[21], expr12),
msat_make_geq(menv, x_xs[21], expr13),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[21], expr0),
msat_make_equal(menv, x_xs[21], expr1),
msat_make_equal(menv, x_xs[21], expr2),
msat_make_equal(menv, x_xs[21], expr3),
msat_make_equal(menv, x_xs[21], expr4),
msat_make_equal(menv, x_xs[21], expr5),
msat_make_equal(menv, x_xs[21], expr6),
msat_make_equal(menv, x_xs[21], expr7),
msat_make_equal(menv, x_xs[21], expr8),
msat_make_equal(menv, x_xs[21], expr9),
msat_make_equal(menv, x_xs[21], expr10),
msat_make_equal(menv, x_xs[21], expr11),
msat_make_equal(menv, x_xs[21], expr12),
msat_make_equal(menv, x_xs[21], expr13),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_19_0)
expr1 = msat_make_plus(menv, xs[2], n_5_0)
expr2 = msat_make_plus(menv, xs[3], n_13_0)
expr3 = msat_make_plus(menv, xs[7], n_16_0)
expr4 = msat_make_plus(menv, xs[11], n_12_0)
expr5 = msat_make_plus(menv, xs[12], n_19_0)
expr6 = msat_make_plus(menv, xs[15], n_6_0)
expr7 = msat_make_plus(menv, xs[16], n_16_0)
expr8 = msat_make_plus(menv, xs[18], n_2_0)
expr9 = msat_make_plus(menv, xs[19], n_19_0)
expr10 = msat_make_plus(menv, xs[21], n_1_0)
expr11 = msat_make_plus(menv, xs[22], n_12_0)
expr12 = msat_make_plus(menv, xs[23], n_11_0)
expr13 = msat_make_plus(menv, xs[27], n_9_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[22], expr0),
msat_make_geq(menv, x_xs[22], expr1),
msat_make_geq(menv, x_xs[22], expr2),
msat_make_geq(menv, x_xs[22], expr3),
msat_make_geq(menv, x_xs[22], expr4),
msat_make_geq(menv, x_xs[22], expr5),
msat_make_geq(menv, x_xs[22], expr6),
msat_make_geq(menv, x_xs[22], expr7),
msat_make_geq(menv, x_xs[22], expr8),
msat_make_geq(menv, x_xs[22], expr9),
msat_make_geq(menv, x_xs[22], expr10),
msat_make_geq(menv, x_xs[22], expr11),
msat_make_geq(menv, x_xs[22], expr12),
msat_make_geq(menv, x_xs[22], expr13),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[22], expr0),
msat_make_equal(menv, x_xs[22], expr1),
msat_make_equal(menv, x_xs[22], expr2),
msat_make_equal(menv, x_xs[22], expr3),
msat_make_equal(menv, x_xs[22], expr4),
msat_make_equal(menv, x_xs[22], expr5),
msat_make_equal(menv, x_xs[22], expr6),
msat_make_equal(menv, x_xs[22], expr7),
msat_make_equal(menv, x_xs[22], expr8),
msat_make_equal(menv, x_xs[22], expr9),
msat_make_equal(menv, x_xs[22], expr10),
msat_make_equal(menv, x_xs[22], expr11),
msat_make_equal(menv, x_xs[22], expr12),
msat_make_equal(menv, x_xs[22], expr13),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_11_0)
expr1 = msat_make_plus(menv, xs[3], n_19_0)
expr2 = msat_make_plus(menv, xs[5], n_6_0)
expr3 = msat_make_plus(menv, xs[6], n_20_0)
expr4 = msat_make_plus(menv, xs[7], n_9_0)
expr5 = msat_make_plus(menv, xs[8], n_15_0)
expr6 = msat_make_plus(menv, xs[9], n_3_0)
expr7 = msat_make_plus(menv, xs[10], n_2_0)
expr8 = msat_make_plus(menv, xs[12], n_5_0)
expr9 = msat_make_plus(menv, xs[17], n_15_0)
expr10 = msat_make_plus(menv, xs[18], n_6_0)
expr11 = msat_make_plus(menv, xs[22], n_20_0)
expr12 = msat_make_plus(menv, xs[23], n_6_0)
expr13 = msat_make_plus(menv, xs[24], n_4_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[23], expr0),
msat_make_geq(menv, x_xs[23], expr1),
msat_make_geq(menv, x_xs[23], expr2),
msat_make_geq(menv, x_xs[23], expr3),
msat_make_geq(menv, x_xs[23], expr4),
msat_make_geq(menv, x_xs[23], expr5),
msat_make_geq(menv, x_xs[23], expr6),
msat_make_geq(menv, x_xs[23], expr7),
msat_make_geq(menv, x_xs[23], expr8),
msat_make_geq(menv, x_xs[23], expr9),
msat_make_geq(menv, x_xs[23], expr10),
msat_make_geq(menv, x_xs[23], expr11),
msat_make_geq(menv, x_xs[23], expr12),
msat_make_geq(menv, x_xs[23], expr13),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[23], expr0),
msat_make_equal(menv, x_xs[23], expr1),
msat_make_equal(menv, x_xs[23], expr2),
msat_make_equal(menv, x_xs[23], expr3),
msat_make_equal(menv, x_xs[23], expr4),
msat_make_equal(menv, x_xs[23], expr5),
msat_make_equal(menv, x_xs[23], expr6),
msat_make_equal(menv, x_xs[23], expr7),
msat_make_equal(menv, x_xs[23], expr8),
msat_make_equal(menv, x_xs[23], expr9),
msat_make_equal(menv, x_xs[23], expr10),
msat_make_equal(menv, x_xs[23], expr11),
msat_make_equal(menv, x_xs[23], expr12),
msat_make_equal(menv, x_xs[23], expr13),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[2], n_3_0)
expr1 = msat_make_plus(menv, xs[4], n_7_0)
expr2 = msat_make_plus(menv, xs[5], n_16_0)
expr3 = msat_make_plus(menv, xs[6], n_19_0)
expr4 = msat_make_plus(menv, xs[9], n_6_0)
expr5 = msat_make_plus(menv, xs[10], n_2_0)
expr6 = msat_make_plus(menv, xs[14], n_14_0)
expr7 = msat_make_plus(menv, xs[15], n_13_0)
expr8 = msat_make_plus(menv, xs[16], n_10_0)
expr9 = msat_make_plus(menv, xs[19], n_13_0)
expr10 = msat_make_plus(menv, xs[20], n_10_0)
expr11 = msat_make_plus(menv, xs[25], n_6_0)
expr12 = msat_make_plus(menv, xs[26], n_9_0)
expr13 = msat_make_plus(menv, xs[27], n_19_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[24], expr0),
msat_make_geq(menv, x_xs[24], expr1),
msat_make_geq(menv, x_xs[24], expr2),
msat_make_geq(menv, x_xs[24], expr3),
msat_make_geq(menv, x_xs[24], expr4),
msat_make_geq(menv, x_xs[24], expr5),
msat_make_geq(menv, x_xs[24], expr6),
msat_make_geq(menv, x_xs[24], expr7),
msat_make_geq(menv, x_xs[24], expr8),
msat_make_geq(menv, x_xs[24], expr9),
msat_make_geq(menv, x_xs[24], expr10),
msat_make_geq(menv, x_xs[24], expr11),
msat_make_geq(menv, x_xs[24], expr12),
msat_make_geq(menv, x_xs[24], expr13),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[24], expr0),
msat_make_equal(menv, x_xs[24], expr1),
msat_make_equal(menv, x_xs[24], expr2),
msat_make_equal(menv, x_xs[24], expr3),
msat_make_equal(menv, x_xs[24], expr4),
msat_make_equal(menv, x_xs[24], expr5),
msat_make_equal(menv, x_xs[24], expr6),
msat_make_equal(menv, x_xs[24], expr7),
msat_make_equal(menv, x_xs[24], expr8),
msat_make_equal(menv, x_xs[24], expr9),
msat_make_equal(menv, x_xs[24], expr10),
msat_make_equal(menv, x_xs[24], expr11),
msat_make_equal(menv, x_xs[24], expr12),
msat_make_equal(menv, x_xs[24], expr13),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_5_0)
expr1 = msat_make_plus(menv, xs[2], n_9_0)
expr2 = msat_make_plus(menv, xs[3], n_11_0)
expr3 = msat_make_plus(menv, xs[5], n_17_0)
expr4 = msat_make_plus(menv, xs[7], n_15_0)
expr5 = msat_make_plus(menv, xs[11], n_6_0)
expr6 = msat_make_plus(menv, xs[14], n_2_0)
expr7 = msat_make_plus(menv, xs[17], n_12_0)
expr8 = msat_make_plus(menv, xs[18], n_4_0)
expr9 = msat_make_plus(menv, xs[20], n_2_0)
expr10 = msat_make_plus(menv, xs[23], n_3_0)
expr11 = msat_make_plus(menv, xs[24], n_11_0)
expr12 = msat_make_plus(menv, xs[25], n_17_0)
expr13 = msat_make_plus(menv, xs[26], n_10_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[25], expr0),
msat_make_geq(menv, x_xs[25], expr1),
msat_make_geq(menv, x_xs[25], expr2),
msat_make_geq(menv, x_xs[25], expr3),
msat_make_geq(menv, x_xs[25], expr4),
msat_make_geq(menv, x_xs[25], expr5),
msat_make_geq(menv, x_xs[25], expr6),
msat_make_geq(menv, x_xs[25], expr7),
msat_make_geq(menv, x_xs[25], expr8),
msat_make_geq(menv, x_xs[25], expr9),
msat_make_geq(menv, x_xs[25], expr10),
msat_make_geq(menv, x_xs[25], expr11),
msat_make_geq(menv, x_xs[25], expr12),
msat_make_geq(menv, x_xs[25], expr13),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[25], expr0),
msat_make_equal(menv, x_xs[25], expr1),
msat_make_equal(menv, x_xs[25], expr2),
msat_make_equal(menv, x_xs[25], expr3),
msat_make_equal(menv, x_xs[25], expr4),
msat_make_equal(menv, x_xs[25], expr5),
msat_make_equal(menv, x_xs[25], expr6),
msat_make_equal(menv, x_xs[25], expr7),
msat_make_equal(menv, x_xs[25], expr8),
msat_make_equal(menv, x_xs[25], expr9),
msat_make_equal(menv, x_xs[25], expr10),
msat_make_equal(menv, x_xs[25], expr11),
msat_make_equal(menv, x_xs[25], expr12),
msat_make_equal(menv, x_xs[25], expr13),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_20_0)
expr1 = msat_make_plus(menv, xs[2], n_9_0)
expr2 = msat_make_plus(menv, xs[3], n_9_0)
expr3 = msat_make_plus(menv, xs[4], n_13_0)
expr4 = msat_make_plus(menv, xs[5], n_1_0)
expr5 = msat_make_plus(menv, xs[7], n_5_0)
expr6 = msat_make_plus(menv, xs[8], n_2_0)
expr7 = msat_make_plus(menv, xs[10], n_10_0)
expr8 = msat_make_plus(menv, xs[11], n_5_0)
expr9 = msat_make_plus(menv, xs[12], n_18_0)
expr10 = msat_make_plus(menv, xs[14], n_14_0)
expr11 = msat_make_plus(menv, xs[15], n_18_0)
expr12 = msat_make_plus(menv, xs[20], n_13_0)
expr13 = msat_make_plus(menv, xs[25], n_2_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[26], expr0),
msat_make_geq(menv, x_xs[26], expr1),
msat_make_geq(menv, x_xs[26], expr2),
msat_make_geq(menv, x_xs[26], expr3),
msat_make_geq(menv, x_xs[26], expr4),
msat_make_geq(menv, x_xs[26], expr5),
msat_make_geq(menv, x_xs[26], expr6),
msat_make_geq(menv, x_xs[26], expr7),
msat_make_geq(menv, x_xs[26], expr8),
msat_make_geq(menv, x_xs[26], expr9),
msat_make_geq(menv, x_xs[26], expr10),
msat_make_geq(menv, x_xs[26], expr11),
msat_make_geq(menv, x_xs[26], expr12),
msat_make_geq(menv, x_xs[26], expr13),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[26], expr0),
msat_make_equal(menv, x_xs[26], expr1),
msat_make_equal(menv, x_xs[26], expr2),
msat_make_equal(menv, x_xs[26], expr3),
msat_make_equal(menv, x_xs[26], expr4),
msat_make_equal(menv, x_xs[26], expr5),
msat_make_equal(menv, x_xs[26], expr6),
msat_make_equal(menv, x_xs[26], expr7),
msat_make_equal(menv, x_xs[26], expr8),
msat_make_equal(menv, x_xs[26], expr9),
msat_make_equal(menv, x_xs[26], expr10),
msat_make_equal(menv, x_xs[26], expr11),
msat_make_equal(menv, x_xs[26], expr12),
msat_make_equal(menv, x_xs[26], expr13),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[1], n_1_0)
expr1 = msat_make_plus(menv, xs[2], n_19_0)
expr2 = msat_make_plus(menv, xs[6], n_6_0)
expr3 = msat_make_plus(menv, xs[8], n_17_0)
expr4 = msat_make_plus(menv, xs[9], n_6_0)
expr5 = msat_make_plus(menv, xs[12], n_12_0)
expr6 = msat_make_plus(menv, xs[14], n_6_0)
expr7 = msat_make_plus(menv, xs[16], n_1_0)
expr8 = msat_make_plus(menv, xs[18], n_1_0)
expr9 = msat_make_plus(menv, xs[20], n_16_0)
expr10 = msat_make_plus(menv, xs[21], n_2_0)
expr11 = msat_make_plus(menv, xs[23], n_11_0)
expr12 = msat_make_plus(menv, xs[25], n_6_0)
expr13 = msat_make_plus(menv, xs[26], n_18_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[27], expr0),
msat_make_geq(menv, x_xs[27], expr1),
msat_make_geq(menv, x_xs[27], expr2),
msat_make_geq(menv, x_xs[27], expr3),
msat_make_geq(menv, x_xs[27], expr4),
msat_make_geq(menv, x_xs[27], expr5),
msat_make_geq(menv, x_xs[27], expr6),
msat_make_geq(menv, x_xs[27], expr7),
msat_make_geq(menv, x_xs[27], expr8),
msat_make_geq(menv, x_xs[27], expr9),
msat_make_geq(menv, x_xs[27], expr10),
msat_make_geq(menv, x_xs[27], expr11),
msat_make_geq(menv, x_xs[27], expr12),
msat_make_geq(menv, x_xs[27], expr13),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[27], expr0),
msat_make_equal(menv, x_xs[27], expr1),
msat_make_equal(menv, x_xs[27], expr2),
msat_make_equal(menv, x_xs[27], expr3),
msat_make_equal(menv, x_xs[27], expr4),
msat_make_equal(menv, x_xs[27], expr5),
msat_make_equal(menv, x_xs[27], expr6),
msat_make_equal(menv, x_xs[27], expr7),
msat_make_equal(menv, x_xs[27], expr8),
msat_make_equal(menv, x_xs[27], expr9),
msat_make_equal(menv, x_xs[27], expr10),
msat_make_equal(menv, x_xs[27], expr11),
msat_make_equal(menv, x_xs[27], expr12),
msat_make_equal(menv, x_xs[27], expr13),))
trans = msat_make_and(menv, trans, _t)
# ltl property: ((G (x_15 - x_4 > 5)) U (X (x_20 - x_6 >= 3)))
ltl = enc.make_U(enc.make_G(msat_make_gt(menv, msat_make_minus(menv, xs[15], xs[4]), msat_make_number(menv, "5"))), enc.make_X(msat_make_geq(menv, msat_make_minus(menv, xs[20], xs[6]), msat_make_number(menv, "3"))))
return TermMap(curr2next), init, trans, ltl
| 56.63191
| 226
| 0.500686
|
b1318bcc5cc03ca9867089c64656df0df65920bd
| 811
|
py
|
Python
|
tests/compose/tea-tasks/prep_infuser.py
|
Dynatrace/alyeska
|
a1e08e105e9c7ae7f10852363f2e5dca5db9be0c
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2019-09-11T11:24:19.000Z
|
2019-10-01T15:25:04.000Z
|
tests/compose/tea-tasks/prep_infuser.py
|
Dynatrace/alyeska
|
a1e08e105e9c7ae7f10852363f2e5dca5db9be0c
|
[
"ECL-2.0",
"Apache-2.0"
] | 25
|
2019-09-11T12:12:12.000Z
|
2019-10-10T10:38:22.000Z
|
tests/compose/tea-tasks/prep_infuser.py
|
Dynatrace/alyeska
|
a1e08e105e9c7ae7f10852363f2e5dca5db9be0c
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
## ---------------------------------------------------------------------------
## Copyright 2019 Dynatrace LLC
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
## ---------------------------------------------------------------------------
print("I am preparing the infuser...")
| 45.055556
| 78
| 0.589396
|
ab5de2090ac18e2ac606508e253904634aa85f8e
| 911
|
py
|
Python
|
utils.py
|
Atharva-Phatak/TESLEA
|
0bf3e8980d3c328e25849ba5f7029ec698b31c70
|
[
"Apache-2.0"
] | 1
|
2022-03-10T00:22:26.000Z
|
2022-03-10T00:22:26.000Z
|
utils.py
|
Atharva-Phatak/TESLEA
|
0bf3e8980d3c328e25849ba5f7029ec698b31c70
|
[
"Apache-2.0"
] | null | null | null |
utils.py
|
Atharva-Phatak/TESLEA
|
0bf3e8980d3c328e25849ba5f7029ec698b31c70
|
[
"Apache-2.0"
] | null | null | null |
import json
import torch
from nltk import word_tokenize
from string import punctuation
def save_json(data, fname):
with open(fname, "w") as f:
json.dump(data, f, indent=4)
def compute_mean(l, k):
return sum([o[k] for o in l]) / len(l)
def create_weight_vector(fname, exclude_tokens, num_weights):
# prepare weights vectors
weights = []
with open(fname) as f:
for line in filter(lambda l: len(l) > 0, f.readlines()):
index, weight = line.strip().split()
if int(index) not in exclude_tokens:
weights.append((int(index), float(weight)))
weights = [w for w in weights if w[1] < 0]
if num_weights > -1:
weights = weights[:num_weights]
# split ids and weights
ids = [x[0] for x in weights]
weights = torch.tensor([abs(x[1]) for x in weights])
return ids, weights
| 25.305556
| 65
| 0.598244
|
14c60dbcbcca49a3c6e5b47f19fb7dc46597b034
| 676
|
py
|
Python
|
things/urls.py
|
franciscobarrerarodriguez/redzza
|
69dfc983a6e34a2882cc58df5ea4dd7f4bbc2578
|
[
"Apache-2.0"
] | null | null | null |
things/urls.py
|
franciscobarrerarodriguez/redzza
|
69dfc983a6e34a2882cc58df5ea4dd7f4bbc2578
|
[
"Apache-2.0"
] | null | null | null |
things/urls.py
|
franciscobarrerarodriguez/redzza
|
69dfc983a6e34a2882cc58df5ea4dd7f4bbc2578
|
[
"Apache-2.0"
] | 1
|
2020-02-21T13:04:44.000Z
|
2020-02-21T13:04:44.000Z
|
from rest_framework import routers
from .views import NoticeViewSet, CityNoticeViewSet, ProductViewSet, ColorViewSet, ServiceViewSet, ImageViewSet, VideoViewSet, CommentaryViewSet, ApiServicesViewSet
router = routers.DefaultRouter()
router.register(r'notices', NoticeViewSet)
# router.register(r'citiesNotice', CityNoticeViewSet)
# router.register(r'products', ProductViewSet)
# router.register(r'colors', ColorViewSet)
# router.register(r'services', ServiceViewSet)
router.register(r'images', ImageViewSet)
router.register(r'videos', VideoViewSet)
router.register(r'comments', CommentaryViewSet)
router.register(r'apiServices', ApiServicesViewSet, base_name='apiServices')
| 45.066667
| 164
| 0.823964
|
456aeae60f457b689aaa33fd04a60308c85100b8
| 2,716
|
py
|
Python
|
migrations/versions/58951df819e3_new_database.py
|
petermirithu/Pitch_web_App
|
21cd116dccfefd5bfca40ca2cf3df0b326d19adb
|
[
"MIT"
] | null | null | null |
migrations/versions/58951df819e3_new_database.py
|
petermirithu/Pitch_web_App
|
21cd116dccfefd5bfca40ca2cf3df0b326d19adb
|
[
"MIT"
] | null | null | null |
migrations/versions/58951df819e3_new_database.py
|
petermirithu/Pitch_web_App
|
21cd116dccfefd5bfca40ca2cf3df0b326d19adb
|
[
"MIT"
] | 1
|
2020-01-13T22:04:56.000Z
|
2020-01-13T22:04:56.000Z
|
"""new database
Revision ID: 58951df819e3
Revises:
Create Date: 2019-11-23 22:43:23.952067
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '58951df819e3'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('commenttable',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('pitch_id', sa.Integer(), nullable=True),
sa.Column('pitch_title', sa.String(length=100), nullable=True),
sa.Column('p_comment', sa.String(length=200), nullable=True),
sa.Column('post_com', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=20), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('usertable',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=20), nullable=True),
sa.Column('email', sa.String(length=30), nullable=True),
sa.Column('pass_word', sa.String(length=20), nullable=True),
sa.Column('bio', sa.String(length=100), nullable=True),
sa.Column('profile_pic_path', sa.String(), nullable=True),
sa.Column('post_user', sa.DateTime(), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_usertable_email'), 'usertable', ['email'], unique=True)
op.create_index(op.f('ix_usertable_username'), 'usertable', ['username'], unique=False)
op.create_table('pitchtable',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('category', sa.String(length=20), nullable=True),
sa.Column('p_title', sa.String(length=100), nullable=True),
sa.Column('pitch_it', sa.String(length=255), nullable=True),
sa.Column('post', sa.DateTime(), nullable=True),
sa.Column('upvote', sa.Integer(), nullable=True),
sa.Column('downvote', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['usertable.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('pitchtable')
op.drop_index(op.f('ix_usertable_username'), table_name='usertable')
op.drop_index(op.f('ix_usertable_email'), table_name='usertable')
op.drop_table('usertable')
op.drop_table('roles')
op.drop_table('commenttable')
# ### end Alembic commands ###
| 37.722222
| 91
| 0.674521
|
5562bf5c624b6c39ea37e0172b5faa3dce95f90e
| 2,255
|
py
|
Python
|
network_controllers/dnac/device_list.py
|
usmcfiredog/netprog_basics
|
ac4e110390ca1f011880b161401da7bc755bc2b7
|
[
"MIT"
] | null | null | null |
network_controllers/dnac/device_list.py
|
usmcfiredog/netprog_basics
|
ac4e110390ca1f011880b161401da7bc755bc2b7
|
[
"MIT"
] | null | null | null |
network_controllers/dnac/device_list.py
|
usmcfiredog/netprog_basics
|
ac4e110390ca1f011880b161401da7bc755bc2b7
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
"""
Learning Series: Network Programmability Basics
Module: Network Controllers
Lesson: Program your own DNA with DNA Center APIs
Author: Hank Preston <hapresto@cisco.com>
example1.py
Illustrate the following concepts:
- Building DNA Center API Code
- Start from Postman Auto-generated code
- Multiple requests in one script
"""
__author__ = "Hank Preston"
__author_email__ = "hapresto@cisco.com"
__copyright__ = "Copyright (c) 2016 Cisco Systems, Inc."
__license__ = "MIT"
from device_info import dnac
import requests
import json
import urllib3
# Silence the insecure warning due to SSL Certificate
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
headers = {
'content-type': "application/json",
'x-auth-token': ""
}
def dnac_login(host, username, password):
"""
Use the REST API to Log into an DNA Center and retrieve ticket
"""
url = "https://{}/api/system/v1/auth/token".format(host)
# Make Login request and return the response body
response = requests.request("POST", url,
auth=(username, password),
headers=headers, verify=False)
return response.json()["Token"]
def network_device_list(host, token):
"""
Use the REST API to retrieve the list of network devices
"""
url = "https://{}/api/v1/network-device".format(host)
headers["x-auth-token"] = token
# Make API request and return the response body
response = requests.request("GET", url, headers=headers, verify=False)
return response.json()["response"]
# Entry point for program
if __name__ == '__main__':
# Log into the DNA Center Controller to get Ticket
token = dnac_login(dnac["host"], dnac["username"], dnac["password"])
# Get the list of devices
devices = network_device_list(dnac["host"], token)
# Loop through the devices and print details
for device in devices:
print("{} in family {}".format(device["hostname"], device["family"]))
print(" Management IP: {}".format(device["managementIpAddress"]))
print(" Platform Type: {}".format(device["platformId"]))
print(" Software Version: {}".format(device["softwareVersion"]))
print("")
| 30.066667
| 77
| 0.678936
|
08076fd193259202e73a635adcca46ab8c01c898
| 313
|
py
|
Python
|
topCoder/srms/300s/srm305/div2/multi_read.py
|
gauravsingh58/algo
|
397859a53429e7a585e5f6964ad24146c6261326
|
[
"WTFPL"
] | 1
|
2020-09-30T19:53:08.000Z
|
2020-09-30T19:53:08.000Z
|
topCoder/srms/300s/srm305/div2/multi_read.py
|
gauravsingh58/algo
|
397859a53429e7a585e5f6964ad24146c6261326
|
[
"WTFPL"
] | null | null | null |
topCoder/srms/300s/srm305/div2/multi_read.py
|
gauravsingh58/algo
|
397859a53429e7a585e5f6964ad24146c6261326
|
[
"WTFPL"
] | 1
|
2020-10-15T09:10:57.000Z
|
2020-10-15T09:10:57.000Z
|
from itertools import groupby
from math import ceil
class MultiRead:
def minCycles(self, trace, procs):
c = 0
for k, g in groupby(trace):
if k == 'R':
c += int(ceil(float(len(list(g))) / procs))
else:
c += len(list(g))
return c
| 24.076923
| 59
| 0.495208
|
55ca5c20b94de030678bff340ffa049c574332d7
| 3,810
|
py
|
Python
|
userbot/plugins/getmusic.py
|
Munnipopz/CatUserbot
|
b0e54241aad6b4778b99807c4f78c922ef7befa0
|
[
"MIT"
] | 1
|
2020-07-18T07:42:58.000Z
|
2020-07-18T07:42:58.000Z
|
userbot/plugins/getmusic.py
|
praveen368/CatUserbot
|
4b0cd970551ffaf86b9fdd5da584c1b3882821ff
|
[
"MIT"
] | null | null | null |
userbot/plugins/getmusic.py
|
praveen368/CatUserbot
|
4b0cd970551ffaf86b9fdd5da584c1b3882821ff
|
[
"MIT"
] | null | null | null |
import requests
from bs4 import BeautifulSoup
from telethon import events
import subprocess
from telethon.errors import MessageEmptyError, MessageTooLongError, MessageNotModifiedError
import io
import asyncio
from userbot.utils import admin_cmd
import glob
import os
from userbot import CMD_HELP, ALIVE_NAME, catdef
from hachoir.metadata import extractMetadata
from hachoir.parser import createParser
from telethon.tl.types import DocumentAttributeVideo
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else "@Sur_vivor"
@borg.on(admin_cmd(pattern="song(?: |$)(.*)"))
async def _(event):
reply_to_id = event.message.id
if event.reply_to_msg_id:
reply_to_id = event.reply_to_msg_id
reply = await event.get_reply_message()
if event.pattern_match.group(1):
query = event.pattern_match.group(1)
await event.edit("wi8..! I am finding your song....")
elif reply.message:
query = reply.message
await event.edit("wi8..! I am finding your song....")
else:
await event.edit("`What I am Supposed to find `")
return
catdef.catmusic(str(query),"320k")
l = glob.glob("*.mp3")
if l:
await event.edit("yeah..! i found something wi8..🥰")
else:
await event.edit(f"Sorry..! i can't find anything with `{query}`")
loa = l[0]
await borg.send_file(
event.chat_id,
loa,
force_document=True,
allow_cache=False,
caption=f"`Song`: {query}\n`Uploaded by`: {DEFAULTUSER}",
reply_to=reply_to_id
)
await event.delete()
os.system("rm -rf *.mp3")
subprocess.check_output("rm -rf *.mp3",shell=True)
@borg.on(admin_cmd(pattern="videosong(?: |$)(.*)"))
async def _(event):
reply_to_id = event.message.id
if event.reply_to_msg_id:
reply_to_id = event.reply_to_msg_id
reply = await event.get_reply_message()
if event.pattern_match.group(1):
query = event.pattern_match.group(1)
await event.edit("wi8..! I am finding your videosong....")
elif reply.message:
query = reply.message
await event.edit("wi8..! I am finding your videosong....")
else:
await event.edit("What I am Supposed to find")
return
catdef.catmusicvideo(query)
l = glob.glob(("*.mp4")) + glob.glob(("*.mkv")) + glob.glob(("*.webm"))
if l:
await event.edit("yeah..! i found something wi8..🥰")
else:
await event.edit(f"Sorry..! i can't find anything with `{query}`")
loa = l[0]
metadata = extractMetadata(createParser(loa))
duration = 0
width = 0
height = 0
if metadata.has("duration"):
duration = metadata.get("duration").seconds
if metadata.has("width"):
width = metadata.get("width")
if metadata.has("height"):
height = metadata.get("height")
await borg.send_file(
event.chat_id,
loa,
force_document=True,
allow_cache=False,
caption=f"`Song`: {query}\n`Uploaded by`: {DEFAULTUSER}",
supports_streaming=True,
reply_to=reply_to_id,
attributes=[DocumentAttributeVideo(
duration=duration,
w=width,
h=height,
round_message=False,
supports_streaming=True,
)],
)
await event.delete()
os.system("rm -rf *.mkv")
os.system("rm -rf *.mp4")
os.system("rm -rf *.webm")
CMD_HELP.update({"getmusic":
"`.song` query or `.song` reply to song name :\
\nUSAGE:finds the song you entered in query and sends it"
})
| 34.636364
| 91
| 0.590026
|
68a129ccf788249e2e6d80ad2be9666f6030bf16
| 38
|
py
|
Python
|
amlpp/architect/__init__.py
|
Asirg/papds
|
57ce01898ed670b67537218fb4652809de71fa75
|
[
"MIT"
] | 1
|
2021-12-06T13:28:27.000Z
|
2021-12-06T13:28:27.000Z
|
amlpp/architect/__init__.py
|
Asirg/papds
|
57ce01898ed670b67537218fb4652809de71fa75
|
[
"MIT"
] | null | null | null |
amlpp/architect/__init__.py
|
Asirg/papds
|
57ce01898ed670b67537218fb4652809de71fa75
|
[
"MIT"
] | 1
|
2022-02-09T11:43:04.000Z
|
2022-02-09T11:43:04.000Z
|
from .experimenter import Experimenter
| 38
| 38
| 0.894737
|
7120c04f47b9fc147824a48f1a8bdfe7f9f54a53
| 1,659
|
py
|
Python
|
sdk/keyvault/azure-keyvault-secrets/tests/secrets_test_case.py
|
GabrielHobold/azure-sdk-for-python
|
7248645bcb0d590eafdae6ffc9d25ec688a0ff68
|
[
"MIT"
] | null | null | null |
sdk/keyvault/azure-keyvault-secrets/tests/secrets_test_case.py
|
GabrielHobold/azure-sdk-for-python
|
7248645bcb0d590eafdae6ffc9d25ec688a0ff68
|
[
"MIT"
] | null | null | null |
sdk/keyvault/azure-keyvault-secrets/tests/secrets_test_case.py
|
GabrielHobold/azure-sdk-for-python
|
7248645bcb0d590eafdae6ffc9d25ec688a0ff68
|
[
"MIT"
] | null | null | null |
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import time
import os
from devtools_testutils import AzureMgmtTestCase
class KeyVaultTestCase(AzureMgmtTestCase):
def setUp(self):
self.list_test_size = 7
super(KeyVaultTestCase, self).setUp()
def tearDown(self):
super(KeyVaultTestCase, self).tearDown()
if self.is_live:
dirname = os.path.dirname(__file__)
seed_filename = os.path.join(dirname, "seed.txt")
with open(seed_filename, 'w') as f:
f.write(os.environ['RUN_IDENTIFIER'])
def _poll_until_no_exception(self, fn, expected_exception, max_retries=20, retry_delay=3):
"""polling helper for live tests because some operations take an unpredictable amount of time to complete"""
for i in range(max_retries):
try:
return fn()
except expected_exception:
if i == max_retries - 1:
raise
if self.is_live:
time.sleep(retry_delay)
def _poll_until_exception(self, fn, expected_exception, max_retries=20, retry_delay=3):
"""polling helper for live tests because some operations take an unpredictable amount of time to complete"""
for _ in range(max_retries):
try:
fn()
if self.is_live:
time.sleep(retry_delay)
except expected_exception:
return
self.fail("expected exception {expected_exception} was not raised")
| 34.5625
| 116
| 0.588306
|
73ebc6fab682a3fb8dcb5ac1570e903c7b493a54
| 3,609
|
py
|
Python
|
source/deepsecurity/models/heap_rights.py
|
felipecosta09/cloudone-workload-controltower-lifecycle
|
7927c84d164058b034fc872701b5ee117641f4d1
|
[
"Apache-2.0"
] | 1
|
2021-10-30T16:40:09.000Z
|
2021-10-30T16:40:09.000Z
|
source/deepsecurity/models/heap_rights.py
|
felipecosta09/cloudone-workload-controltower-lifecycle
|
7927c84d164058b034fc872701b5ee117641f4d1
|
[
"Apache-2.0"
] | 1
|
2021-07-28T20:19:03.000Z
|
2021-07-28T20:19:03.000Z
|
source/deepsecurity/models/heap_rights.py
|
felipecosta09/cloudone-workload-controltower-lifecycle
|
7927c84d164058b034fc872701b5ee117641f4d1
|
[
"Apache-2.0"
] | 1
|
2021-10-30T16:40:02.000Z
|
2021-10-30T16:40:02.000Z
|
# coding: utf-8
"""
Trend Micro Deep Security API
Copyright 2018 - 2020 Trend Micro Incorporated.<br/>Get protected, stay secured, and keep informed with Trend Micro Deep Security's new RESTful API. Access system data and manage security configurations to automate your security workflows and integrate Deep Security into your CI/CD pipeline. # noqa: E501
OpenAPI spec version: 12.5.841
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class HeapRights(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'can_view_heap': 'bool'
}
attribute_map = {
'can_view_heap': 'canViewHeap'
}
def __init__(self, can_view_heap=None): # noqa: E501
"""HeapRights - a model defined in Swagger""" # noqa: E501
self._can_view_heap = None
self.discriminator = None
if can_view_heap is not None:
self.can_view_heap = can_view_heap
@property
def can_view_heap(self):
"""Gets the can_view_heap of this HeapRights. # noqa: E501
Right to view the heap. # noqa: E501
:return: The can_view_heap of this HeapRights. # noqa: E501
:rtype: bool
"""
return self._can_view_heap
@can_view_heap.setter
def can_view_heap(self, can_view_heap):
"""Sets the can_view_heap of this HeapRights.
Right to view the heap. # noqa: E501
:param can_view_heap: The can_view_heap of this HeapRights. # noqa: E501
:type: bool
"""
self._can_view_heap = can_view_heap
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(HeapRights, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, HeapRights):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 30.327731
| 311
| 0.564699
|
cb516dfd010de68200ea6e84de518d4f69c3b658
| 4,035
|
py
|
Python
|
nemo_tools/text_denormalization/taggers/time.py
|
vadam5/NeMo
|
3c5db09539293c3c19a6bb7437011f91261119af
|
[
"Apache-2.0"
] | null | null | null |
nemo_tools/text_denormalization/taggers/time.py
|
vadam5/NeMo
|
3c5db09539293c3c19a6bb7437011f91261119af
|
[
"Apache-2.0"
] | null | null | null |
nemo_tools/text_denormalization/taggers/time.py
|
vadam5/NeMo
|
3c5db09539293c3c19a6bb7437011f91261119af
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_tools.text_denormalization.data_loader_utils import get_abs_path
from nemo_tools.text_denormalization.graph_utils import (
GraphFst,
convert_space,
delete_extra_space,
delete_space,
insert_space,
)
from nemo_tools.text_denormalization.taggers.cardinal import CardinalFst
from nemo_tools.text_denormalization.utils import num_to_word
from pynini.lib import pynutil
class TimeFst(GraphFst):
"""
Finite state transducer for classifying time
e.g. twelve thirty -> time { hours: "12" minutes: "30" }
e.g. twelve past one -> time { minutes: "12" hours: "1" }
e.g. two o clock a m -> time { hours: "2" suffix: "a.m." }
"""
def __init__(self):
super().__init__(name="time", kind="classify")
# hours, minutes, seconds, suffix, zone, style, speak_period
suffix_graph = pynini.string_file(get_abs_path("data/time_suffix.tsv"))
time_zone_graph = pynini.invert(pynini.string_file(get_abs_path("data/time_zone.tsv")))
# only used for < 1000 thousand -> 0 weight
cardinal = pynutil.add_weight(CardinalFst().graph_no_exception, weight=-0.7)
labels_hour = [num_to_word(x) for x in range(0, 24)]
labels_minute_single = [num_to_word(x) for x in range(1, 10)]
labels_minute_double = [num_to_word(x) for x in range(10, 60)]
graph_hour = pynini.union(*labels_hour) @ cardinal
graph_minute_single = pynini.union(*labels_minute_single) @ cardinal
graph_minute_double = pynini.union(*labels_minute_double) @ cardinal
graph_minute_verbose = pynini.cross("half", "30") | pynini.cross("quarter", "15")
oclock = pynini.cross(pynini.union("o' clock", "o clock", "o'clock", "oclock"), "")
final_graph_hour = pynutil.insert("hours: \"") + graph_hour + pynutil.insert("\"")
final_graph_minute = (
pynutil.insert("minutes: \"")
+ (
pynutil.insert("00")
| oclock + pynutil.insert("00")
| pynutil.delete("o") + delete_space + graph_minute_single
| graph_minute_double
)
+ pynutil.insert("\"")
)
final_suffix = pynutil.insert("suffix: \"") + convert_space(suffix_graph) + pynutil.insert("\"")
final_suffix_optional = pynini.closure(delete_space + insert_space + final_suffix, 0, 1)
final_time_zone_optional = pynini.closure(
delete_space
+ insert_space
+ pynutil.insert("zone: \"")
+ convert_space(time_zone_graph)
+ pynutil.insert("\""),
0,
1,
)
# five o' clock
# two o eight, two thiry five (am/pm)
# two pm/am
graph_hm = final_graph_hour + delete_extra_space + final_graph_minute
# 10 past four, quarter past four, half past four
graph_mh = (
pynutil.insert("minutes: \"")
+ pynini.union(graph_minute_single, graph_minute_double, graph_minute_verbose)
+ pynutil.insert("\"")
+ delete_space
+ pynutil.delete("past")
+ delete_extra_space
+ final_graph_hour
)
final_graph = ((graph_hm | graph_mh) + final_suffix_optional + final_time_zone_optional).optimize()
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| 40.35
| 107
| 0.64461
|
93a74fd307d04ab0be4467bd3441e31c3f642a57
| 267
|
py
|
Python
|
yolodex/templatetags/yolodex.py
|
correctiv/django-yolodex
|
e30dff95153c312119acad3c35ef7fcca0fab076
|
[
"MIT"
] | 5
|
2015-06-22T20:15:49.000Z
|
2016-08-17T13:19:41.000Z
|
yolodex/templatetags/yolodex.py
|
correctiv/django-yolodex
|
e30dff95153c312119acad3c35ef7fcca0fab076
|
[
"MIT"
] | null | null | null |
yolodex/templatetags/yolodex.py
|
correctiv/django-yolodex
|
e30dff95153c312119acad3c35ef7fcca0fab076
|
[
"MIT"
] | null | null | null |
from django import template
register = template.Library()
@register.simple_tag
def verbify(edge, subject):
return edge.render_with_subject(subject, link_object=True)
@register.assignment_tag
def dictKeyLookup(the_dict, key):
return the_dict.get(key, '')
| 19.071429
| 62
| 0.771536
|
44e1b0760b12cd3de45ac0aa93aee0a3fd3c6036
| 12,685
|
py
|
Python
|
pylib/gyp/win_tool.py
|
omegaphora/external_chromium_org_tools_gyp
|
d59685f6e5928ab145487c3c25b011e686fb7b10
|
[
"BSD-3-Clause"
] | 33
|
2015-01-21T09:50:21.000Z
|
2022-02-12T15:18:25.000Z
|
deps/gyp/pylib/gyp/win_tool.py
|
free1978/mapbox-gl-native
|
2a50fccd24e762d0de5a53bac358e5ddfea8d213
|
[
"BSD-2-Clause"
] | 5
|
2016-09-28T11:37:41.000Z
|
2022-02-05T11:08:44.000Z
|
deps/gyp/pylib/gyp/win_tool.py
|
free1978/mapbox-gl-native
|
2a50fccd24e762d0de5a53bac358e5ddfea8d213
|
[
"BSD-2-Clause"
] | 8
|
2015-06-08T15:57:25.000Z
|
2019-05-15T08:52:58.000Z
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions for Windows builds.
These functions are executed via gyp-win-tool when using the ninja generator.
"""
import os
import re
import shutil
import subprocess
import stat
import string
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# A regex matching an argument corresponding to the output filename passed to
# link.exe.
_LINK_EXE_OUT_ARG = re.compile('/OUT:(?P<out>.+)$', re.IGNORECASE)
def main(args):
executor = WinTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class WinTool(object):
"""This class performs all the Windows tooling steps. The methods can either
be executed directly, or dispatched from an argument list."""
def _UseSeparateMspdbsrv(self, env, args):
"""Allows to use a unique instance of mspdbsrv.exe per linker instead of a
shared one."""
if len(args) < 1:
raise Exception("Not enough arguments")
if args[0] != 'link.exe':
return
# Use the output filename passed to the linker to generate an endpoint name
# for mspdbsrv.exe.
endpoint_name = None
for arg in args:
m = _LINK_EXE_OUT_ARG.match(arg)
if m:
endpoint_name = re.sub(r'\W+', '',
'%s_%d' % (m.group('out'), os.getpid()))
break
if endpoint_name is None:
return
# Adds the appropriate environment variable. This will be read by link.exe
# to know which instance of mspdbsrv.exe it should connect to (if it's
# not set then the default endpoint is used).
env['_MSPDBSRV_ENDPOINT_'] = endpoint_name
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like recursive-mirror to RecursiveMirror."""
return name_string.title().replace('-', '')
def _GetEnv(self, arch):
"""Gets the saved environment from a file for a given architecture."""
# The environment is saved as an "environment block" (see CreateProcess
# and msvs_emulation for details). We convert to a dict here.
# Drop last 2 NULs, one for list terminator, one for trailing vs. separator.
pairs = open(arch).read()[:-2].split('\0')
kvs = [item.split('=', 1) for item in pairs]
return dict(kvs)
def ExecStamp(self, path):
"""Simple stamp command."""
open(path, 'w').close()
def ExecRecursiveMirror(self, source, dest):
"""Emulation of rm -rf out && cp -af in out."""
if os.path.exists(dest):
if os.path.isdir(dest):
def _on_error(fn, path, excinfo):
# The operation failed, possibly because the file is set to
# read-only. If that's why, make it writable and try the op again.
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWRITE)
fn(path)
shutil.rmtree(dest, onerror=_on_error)
else:
if not os.access(dest, os.W_OK):
# Attempt to make the file writable before deleting it.
os.chmod(dest, stat.S_IWRITE)
os.unlink(dest)
if os.path.isdir(source):
shutil.copytree(source, dest)
else:
shutil.copy2(source, dest)
def ExecLinkWrapper(self, arch, use_separate_mspdbsrv, *args):
"""Filter diagnostic output from link that looks like:
' Creating library ui.dll.lib and object ui.dll.exp'
This happens when there are exports from the dll or exe.
"""
env = self._GetEnv(arch)
if use_separate_mspdbsrv == 'True':
self._UseSeparateMspdbsrv(env, args)
link = subprocess.Popen(args,
shell=True,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, _ = link.communicate()
for line in out.splitlines():
if not line.startswith(' Creating library '):
print line
return link.returncode
def ExecLinkWithManifests(self, arch, embed_manifest, out, ldcmd, resname,
mt, rc, intermediate_manifest, *manifests):
"""A wrapper for handling creating a manifest resource and then executing
a link command."""
# The 'normal' way to do manifests is to have link generate a manifest
# based on gathering dependencies from the object files, then merge that
# manifest with other manifests supplied as sources, convert the merged
# manifest to a resource, and then *relink*, including the compiled
# version of the manifest resource. This breaks incremental linking, and
# is generally overly complicated. Instead, we merge all the manifests
# provided (along with one that includes what would normally be in the
# linker-generated one, see msvs_emulation.py), and include that into the
# first and only link. We still tell link to generate a manifest, but we
# only use that to assert that our simpler process did not miss anything.
variables = {
'python': sys.executable,
'arch': arch,
'out': out,
'ldcmd': ldcmd,
'resname': resname,
'mt': mt,
'rc': rc,
'intermediate_manifest': intermediate_manifest,
'manifests': ' '.join(manifests),
}
add_to_ld = ''
if manifests:
subprocess.check_call(
'%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo '
'-manifest %(manifests)s -out:%(out)s.manifest' % variables)
if embed_manifest == 'True':
subprocess.check_call(
'%(python)s gyp-win-tool manifest-to-rc %(arch)s %(out)s.manifest'
' %(out)s.manifest.rc %(resname)s' % variables)
subprocess.check_call(
'%(python)s gyp-win-tool rc-wrapper %(arch)s %(rc)s '
'%(out)s.manifest.rc' % variables)
add_to_ld = ' %(out)s.manifest.res' % variables
subprocess.check_call(ldcmd + add_to_ld)
# Run mt.exe on the theoretically complete manifest we generated, merging
# it with the one the linker generated to confirm that the linker
# generated one does not add anything. This is strictly unnecessary for
# correctness, it's only to verify that e.g. /MANIFESTDEPENDENCY was not
# used in a #pragma comment.
if manifests:
# Merge the intermediate one with ours to .assert.manifest, then check
# that .assert.manifest is identical to ours.
subprocess.check_call(
'%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo '
'-manifest %(out)s.manifest %(intermediate_manifest)s '
'-out:%(out)s.assert.manifest' % variables)
assert_manifest = '%(out)s.assert.manifest' % variables
our_manifest = '%(out)s.manifest' % variables
# Load and normalize the manifests. mt.exe sometimes removes whitespace,
# and sometimes doesn't unfortunately.
with open(our_manifest, 'rb') as our_f:
with open(assert_manifest, 'rb') as assert_f:
our_data = our_f.read().translate(None, string.whitespace)
assert_data = assert_f.read().translate(None, string.whitespace)
if our_data != assert_data:
os.unlink(out)
def dump(filename):
sys.stderr.write('%s\n-----\n' % filename)
with open(filename, 'rb') as f:
sys.stderr.write(f.read() + '\n-----\n')
dump(intermediate_manifest)
dump(our_manifest)
dump(assert_manifest)
sys.stderr.write(
'Linker generated manifest "%s" added to final manifest "%s" '
'(result in "%s"). '
'Were /MANIFEST switches used in #pragma statements? ' % (
intermediate_manifest, our_manifest, assert_manifest))
return 1
def ExecManifestWrapper(self, arch, *args):
"""Run manifest tool with environment set. Strip out undesirable warning
(some XML blocks are recognized by the OS loader, but not the manifest
tool)."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if line and 'manifest authoring warning 81010002' not in line:
print line
return popen.returncode
def ExecManifestToRc(self, arch, *args):
"""Creates a resource file pointing a SxS assembly manifest.
|args| is tuple containing path to resource file, path to manifest file
and resource name which can be "1" (for executables) or "2" (for DLLs)."""
manifest_path, resource_path, resource_name = args
with open(resource_path, 'wb') as output:
output.write('#include <windows.h>\n%s RT_MANIFEST "%s"' % (
resource_name,
os.path.abspath(manifest_path).replace('\\', '/')))
def ExecMidlWrapper(self, arch, outdir, tlb, h, dlldata, iid, proxy, idl,
*flags):
"""Filter noisy filenames output from MIDL compile step that isn't
quietable via command line flags.
"""
args = ['midl', '/nologo'] + list(flags) + [
'/out', outdir,
'/tlb', tlb,
'/h', h,
'/dlldata', dlldata,
'/iid', iid,
'/proxy', proxy,
idl]
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
# Filter junk out of stdout, and write filtered versions. Output we want
# to filter is pairs of lines that look like this:
# Processing C:\Program Files (x86)\Microsoft SDKs\...\include\objidl.idl
# objidl.idl
lines = out.splitlines()
prefixes = ('Processing ', '64 bit Processing ')
processing = set(os.path.basename(x)
for x in lines if x.startswith(prefixes))
for line in lines:
if not line.startswith(prefixes) and line not in processing:
print line
return popen.returncode
def ExecAsmWrapper(self, arch, *args):
"""Filter logo banner from invocations of asm.exe."""
env = self._GetEnv(arch)
# MSVS doesn't assemble x64 asm files.
if arch == 'environment.x64':
return 0
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if (not line.startswith('Copyright (C) Microsoft Corporation') and
not line.startswith('Microsoft (R) Macro Assembler') and
not line.startswith(' Assembling: ') and
line):
print line
return popen.returncode
def ExecRcWrapper(self, arch, *args):
"""Filter logo banner from invocations of rc.exe. Older versions of RC
don't support the /nologo flag."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if (not line.startswith('Microsoft (R) Windows (R) Resource Compiler') and
not line.startswith('Copyright (C) Microsoft Corporation') and
line):
print line
return popen.returncode
def ExecActionWrapper(self, arch, rspfile, *dir):
"""Runs an action command line from a response file using the environment
for |arch|. If |dir| is supplied, use that as the working directory."""
env = self._GetEnv(arch)
# TODO(scottmg): This is a temporary hack to get some specific variables
# through to actions that are set after gyp-time. http://crbug.com/333738.
for k, v in os.environ.iteritems():
if k not in env:
env[k] = v
args = open(rspfile).read()
dir = dir[0] if dir else None
return subprocess.call(args, shell=True, env=env, cwd=dir)
def ExecClCompile(self, project_dir, selected_files):
"""Executed by msvs-ninja projects when the 'ClCompile' target is used to
build selected C/C++ files."""
project_dir = os.path.relpath(project_dir, BASE_DIR)
selected_files = selected_files.split(';')
ninja_targets = [os.path.join(project_dir, filename) + '^^'
for filename in selected_files]
cmd = ['ninja.exe']
cmd.extend(ninja_targets)
return subprocess.call(cmd, shell=True, cwd=BASE_DIR)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 40.142405
| 80
| 0.645802
|
8f4d1b87d5d14eb8ce299312a0ac968ff8402060
| 2,242
|
py
|
Python
|
detection/pixel_link/util/str_.py
|
HLIG/HUAWEI_OCR2019
|
1070d6291072e0223c2624f686766d0f3065e9c6
|
[
"MIT"
] | 54
|
2019-04-17T07:55:44.000Z
|
2021-06-02T06:00:04.000Z
|
detection/pixel_link/util/str_.py
|
HLIG/HUAWEI_OCR2019
|
1070d6291072e0223c2624f686766d0f3065e9c6
|
[
"MIT"
] | 5
|
2019-04-24T03:22:50.000Z
|
2021-08-18T13:12:38.000Z
|
detection/pixel_link/util/str_.py
|
HLIG/HUAWEI_OCR2019
|
1070d6291072e0223c2624f686766d0f3065e9c6
|
[
"MIT"
] | 28
|
2019-04-17T11:30:58.000Z
|
2021-12-09T13:37:02.000Z
|
# encoding = utf-8
def int_array_to_str(arr):
"""turn an int array to a str"""
return "".join(map(chr, arr))
def join(arr, splitter=','):
temp = []
for e in arr:
temp.append(e)
temp.append(splitter)
temp.pop()
return "".join(temp)
def is_str(s):
return type(s) == str
def to_lowercase(s):
return str.lower(s)
def to_uppercase(s):
return str.upper(s)
def ends_with(s, suffix, ignore_case = False):
"""
suffix: str, list, or tuple
"""
if is_str(suffix):
suffix = [suffix]
suffix = list(suffix)
if ignore_case:
for idx, suf in enumerate(suffix):
suffix[idx] = to_lowercase(suf)
s = to_lowercase(s)
suffix = tuple(suffix)
return s.endswith(suffix)
def starts_with(s, prefix, ignore_case = False):
"""
prefix: str, list, or tuple
"""
if is_str(prefix):
prefix = [prefix]
prefix = list(prefix)
if ignore_case:
for idx, pre in enumerate(prefix):
prefix[idx] = to_lowercase(pre)
s = to_lowercase(s)
prefix = tuple(prefix)
return s.startswith(prefix)
def contains(s, target, ignore_case = False):
if ignore_case:
s = to_lowercase(s)
target = to_lowercase(target)
return s.find(target) >= 0
def index_of(s, target):
return s.find(target)
def replace_all(s, old, new, reg = False):
if reg:
import re
targets = re.findall(old, s)
for t in targets:
s = s.replace(t, new)
else:
s = s.replace(old, new)
return s
def remove_all(s, sub):
return replace_all(s, sub, '')
def split(s, splitter, reg = False):
if not reg:
return s.split(splitter)
import re
return re.split(splitter, s)
def remove_invisible(s):
s = replace_all(s, ' ', '')
s = replace_all(s, '\n', '')
s = replace_all(s, '\t', '')
s = replace_all(s, '\r', '')
s = replace_all(s, '\xef\xbb\xbf', '')
return s
def find_all(s, pattern):
import re
return re.findall(pattern, s)
def is_none_or_empty(s):
if s is None:
return True
return len(s)==0;
def to_json(obj):
import ujson
return ujson.dumps(obj)
| 22.646465
| 48
| 0.574487
|
00e3adb215284d36b2efa692cfcb9130d3d34ae2
| 997
|
py
|
Python
|
mouth_open_algorithm_test.py
|
baranee-18/Mouth_Open_or_Close_Detection
|
d5c49cf6c707d46a36904d951b5b1cd2a15f8ac2
|
[
"MIT"
] | null | null | null |
mouth_open_algorithm_test.py
|
baranee-18/Mouth_Open_or_Close_Detection
|
d5c49cf6c707d46a36904d951b5b1cd2a15f8ac2
|
[
"MIT"
] | null | null | null |
mouth_open_algorithm_test.py
|
baranee-18/Mouth_Open_or_Close_Detection
|
d5c49cf6c707d46a36904d951b5b1cd2a15f8ac2
|
[
"MIT"
] | null | null | null |
from mouth_open_algorithm import get_lip_height, get_mouth_height, check_mouth_open
# obama open mouth
top_lip = [(181, 359), (192, 339), (211, 332), (225, 336), (243, 333), (271, 342), (291, 364), (282, 363), (242, 346), (225, 347), (211, 345), (188, 358)]
bottom_lip = [(291, 364), (270, 389), (243, 401), (223, 403), (207, 399), (190, 383), (181, 359), (188, 358), (210, 377), (225, 381), (243, 380), (282, 363)]
# close mouth
# top_lip = [(151, 127), (157, 126), (163, 126), (168, 127), (172, 127), (178, 127), (185, 129), (182, 129), (172, 130), (167, 130), (163, 129), (153, 127)]
# bottom_lip = [(185, 129), (177, 133), (171, 135), (166, 135), (161, 134), (156, 132), (151, 127), (153, 127), (162, 129), (167, 130), (171, 130), (182, 129)]
print('top_lip height: %.2f' % get_lip_height(top_lip))
print('bottom_lip height: %.2f' % get_lip_height(bottom_lip))
print('mouth height: %.2f' % get_mouth_height(top_lip,bottom_lip))
print('Is mouth open:', check_mouth_open(top_lip,bottom_lip) )
| 71.214286
| 159
| 0.611836
|
11cd8f35dc56c98b64a415cc1b5b3412bbe11274
| 2,392
|
py
|
Python
|
leaf/core/wrapper.py
|
guiqiqi/leaf
|
79e34f4b8fba8c6fd208b5a3049103dca2064ab5
|
[
"Apache-2.0"
] | 119
|
2020-01-30T04:25:03.000Z
|
2022-03-27T07:15:45.000Z
|
leaf/core/wrapper.py
|
guiqiqi/leaf
|
79e34f4b8fba8c6fd208b5a3049103dca2064ab5
|
[
"Apache-2.0"
] | 8
|
2020-02-02T05:49:47.000Z
|
2021-01-25T03:31:09.000Z
|
leaf/core/wrapper.py
|
guiqiqi/leaf
|
79e34f4b8fba8c6fd208b5a3049103dca2064ab5
|
[
"Apache-2.0"
] | 11
|
2020-01-31T15:07:11.000Z
|
2021-03-24T03:47:48.000Z
|
"""Leaf 装饰器函数库"""
import time
import queue
import threading
from typing import Callable, NoReturn
def thread(function: Callable) -> object:
"""制造一个新线程执行指定任务"""
def params(*args, **kwargs):
"""接受任务函数的参数"""
# 通过线程执行函数
def process(*args, **kwargs):
"""过程函数包装"""
function(*args, **kwargs)
_thread = threading.Thread(
target=process, args=args, kwargs=kwargs)
_thread.setDaemon(True)
_thread.start()
return params
def timer(function: Callable) -> object:
"""计时函数 - 执行之后显示执行时间"""
def wrapper(*arg, **kwargs):
"""参数接收器"""
# 计时并执行函数
start = time.time()
result = function(*arg, **kwargs)
end = time.time()
# 显示时间
used = (end - start) * 1000
print("-> elapsed time: %.2f ms" % used)
return result
return wrapper
def timelimit(limited: float) -> object:
"""
限制一个函数的执行时间:
1. 创建两个线程 - 计数器 + 工作线程
2. 通过一个 threading.Lock 的锁同步工作状态
3. 如果锁释放了则判断工作是否完成
*注意: 这种方法在超时之后会触发 TimeoutError
*注意: 但是并不会影响工作线程的工作 - 工作线程无法被动结束
"""
def wrapper(function: Callable):
"""函数包装器"""
# 初始化锁, 队列变量
result = queue.Queue(maxsize=1)
mutex = threading.Lock()
mutex.acquire()
def _timer_work() -> NoReturn:
"""需要计时器到时之后释放锁"""
mutex.release()
def params(*args, **kwargs):
"""参数接收器"""
def _worker_work(*args, **kwargs):
"""任务工作线程"""
result.put(function(*args, **kwargs))
# 检查并尝试释放锁
# pylint: disable=no-member
if mutex.locked():
mutex.release()
# 设置定时器 + 工作线程
_timer = threading.Timer(limited, _timer_work)
_worker = threading.Thread(
target=_worker_work, args=args, kwargs=kwargs)
_worker.setDaemon(True)
_worker.start()
_timer.start()
# 尝试获取锁变量之后检查任务状态
if mutex.acquire():
_timer.cancel()
# 如果任务已经完成 - 返回结果
if not result.empty():
return result.get()
# 如果任务未完成 - 触发超时
raise TimeoutError
return result.get_nowait()
return params
return wrapper
| 23.45098
| 62
| 0.512124
|
374ffe0da032e1e1df0eb46a4505a552eb7a7761
| 16,953
|
py
|
Python
|
crslab/system/tgredial.py
|
Zilize/CRSLab
|
fb357d0dfb7d2cf7b67b892d98e52032a31ca564
|
[
"MIT"
] | null | null | null |
crslab/system/tgredial.py
|
Zilize/CRSLab
|
fb357d0dfb7d2cf7b67b892d98e52032a31ca564
|
[
"MIT"
] | null | null | null |
crslab/system/tgredial.py
|
Zilize/CRSLab
|
fb357d0dfb7d2cf7b67b892d98e52032a31ca564
|
[
"MIT"
] | null | null | null |
# @Time : 2020/12/9
# @Author : Yuanhang Zhou
# @Email : sdzyh002@gmail.com
# UPDATE:
# @Time : 2021/1/3
# @Author : Xiaolei Wang
# @Email : wxl1999@foxmail.com
import os
import torch
from loguru import logger
from math import floor
from crslab.dataset import dataset_language_map
from crslab.evaluator.metrics.base import AverageMetric
from crslab.evaluator.metrics.gen import PPLMetric
from crslab.system.base import BaseSystem
from crslab.system.utils.functions import ind2txt
from crslab.utils import ModelType
class TGReDialSystem(BaseSystem):
"""This is the system for TGReDial model"""
def __init__(self, opt, train_dataloader, valid_dataloader, test_dataloader, vocab, side_data, restore=False,
interaction=False, debug=False, tensorboard=False):
"""
Args:
opt (dict): Indicating the hyper parameters.
train_dataloader (BaseDataLoader): Indicating the train supervised of corresponding dataset.
valid_dataloader (BaseDataLoader): Indicating the valid supervised of corresponding dataset.
test_dataloader (BaseDataLoader): Indicating the test supervised of corresponding dataset.
vocab (dict): Indicating the vocabulary.
side_data (dict): Indicating the side data.
restore (bool, optional): Indicating if we store system after training. Defaults to False.
interaction (bool, optional): Indicating if we interact with system. Defaults to False.
debug (bool, optional): Indicating if we train in debug mode. Defaults to False.
tensorboard (bool, optional) Indicating if we monitor the training performance in tensorboard. Defaults to False.
"""
super(TGReDialSystem, self).__init__(opt, train_dataloader, valid_dataloader,
test_dataloader, vocab, side_data, restore, interaction, debug,
tensorboard)
if hasattr(self, 'conv_model'):
self.ind2tok = vocab['conv']['ind2tok']
self.end_token_idx = vocab['conv']['end']
if hasattr(self, 'rec_model'):
self.item_ids = side_data['rec']['item_entity_ids']
self.id2entity = vocab['rec']['id2entity']
if hasattr(self, 'rec_model'):
self.rec_optim_opt = self.opt['rec']
self.rec_epoch = self.rec_optim_opt['epoch']
self.rec_batch_size = self.rec_optim_opt['batch_size']
if hasattr(self, 'conv_model'):
self.conv_optim_opt = self.opt['conv']
self.conv_epoch = self.conv_optim_opt['epoch']
self.conv_batch_size = self.conv_optim_opt['batch_size']
if self.conv_optim_opt.get('lr_scheduler', None) and 'Transformers' in self.conv_optim_opt['lr_scheduler'][
'name']:
batch_num = 0
for _ in self.train_dataloader['conv'].get_conv_data(batch_size=self.conv_batch_size, shuffle=False):
batch_num += 1
conv_training_steps = self.conv_epoch * floor(batch_num / self.conv_optim_opt.get('update_freq', 1))
self.conv_optim_opt['lr_scheduler']['training_steps'] = conv_training_steps
if hasattr(self, 'policy_model'):
self.policy_optim_opt = self.opt['policy']
self.policy_epoch = self.policy_optim_opt['epoch']
self.policy_batch_size = self.policy_optim_opt['batch_size']
self.language = dataset_language_map[self.opt['dataset']]
def _set_model_type(self) -> ModelType:
return ModelType.GENERATION
def rec_evaluate(self, rec_predict, item_label):
rec_predict = rec_predict.cpu()
rec_predict = rec_predict[:, self.item_ids]
_, rec_ranks = torch.topk(rec_predict, 50, dim=-1)
rec_ranks = rec_ranks.tolist()
item_label = item_label.tolist()
for rec_rank, item in zip(rec_ranks, item_label):
item = self.item_ids.index(item)
self.evaluator.rec_evaluate(rec_rank, item)
def policy_evaluate(self, rec_predict, movie_label):
rec_predict = rec_predict.cpu()
_, rec_ranks = torch.topk(rec_predict, 50, dim=-1)
rec_ranks = rec_ranks.tolist()
movie_label = movie_label.tolist()
for rec_rank, movie in zip(rec_ranks, movie_label):
self.evaluator.rec_evaluate(rec_rank, movie)
def conv_evaluate(self, prediction, response):
"""
Args:
prediction: torch.LongTensor, shape=(bs, response_truncate-1)
response: torch.LongTensor, shape=(bs, response_truncate)
the first token in response is <|endoftext|>, it is not in prediction
"""
prediction = prediction.tolist()
response = response.tolist()
for p, r in zip(prediction, response):
p_str = ind2txt(p, self.ind2tok, self.end_token_idx)
r_str = ind2txt(r[1:], self.ind2tok, self.end_token_idx)
self.evaluator.gen_evaluate(p_str, [r_str])
def step(self, batch, stage, mode):
"""
stage: ['policy', 'rec', 'conv']
mode: ['train', 'val', 'test]
"""
batch = [ele.to(self.device) for ele in batch]
if stage == 'policy':
if mode == 'train':
self.policy_model.train()
else:
self.policy_model.eval()
policy_loss, policy_predict = self.policy_model.forward(batch, mode)
if mode == "train" and policy_loss is not None:
policy_loss = policy_loss.sum()
self.backward(policy_loss)
else:
self.policy_evaluate(policy_predict, batch[-1])
if isinstance(policy_loss, torch.Tensor):
policy_loss = policy_loss.item()
self.evaluator.optim_metrics.add("policy_loss",
AverageMetric(policy_loss))
elif stage == 'rec':
if mode == 'train':
self.rec_model.train()
else:
self.rec_model.eval()
rec_loss, rec_predict = self.rec_model.forward(batch, mode)
rec_loss = rec_loss.sum()
if mode == "train":
self.backward(rec_loss)
else:
self.rec_evaluate(rec_predict, batch[-1])
rec_loss = rec_loss.item()
self.evaluator.optim_metrics.add("rec_loss",
AverageMetric(rec_loss))
elif stage == "conv":
if mode != "test":
# train + valid: need to compute ppl
gen_loss, pred = self.conv_model.forward(batch, mode)
gen_loss = gen_loss.sum()
if mode == 'train':
self.backward(gen_loss)
else:
self.conv_evaluate(pred, batch[-1])
gen_loss = gen_loss.item()
self.evaluator.optim_metrics.add("gen_loss",
AverageMetric(gen_loss))
self.evaluator.gen_metrics.add("ppl", PPLMetric(gen_loss))
else:
# generate response in conv_model.step
pred = self.conv_model.forward(batch, mode)
self.conv_evaluate(pred, batch[-1])
else:
raise
def train_recommender(self):
if hasattr(self.rec_model, 'bert'):
if os.environ["CUDA_VISIBLE_DEVICES"] == '-1':
bert_param = list(self.rec_model.bert.named_parameters())
else:
bert_param = list(self.rec_model.module.bert.named_parameters())
bert_param_name = ['bert.' + n for n, p in bert_param]
else:
bert_param = []
bert_param_name = []
other_param = [
name_param for name_param in self.rec_model.named_parameters()
if name_param[0] not in bert_param_name
]
params = [{'params': [p for n, p in bert_param], 'lr': self.rec_optim_opt['lr_bert']},
{'params': [p for n, p in other_param]}]
self.init_optim(self.rec_optim_opt, params)
for epoch in range(self.rec_epoch):
self.evaluator.reset_metrics()
logger.info(f'[Recommendation epoch {str(epoch)}]')
for batch in self.train_dataloader['rec'].get_rec_data(self.rec_batch_size,
shuffle=True):
self.step(batch, stage='rec', mode='train')
self.evaluator.report(epoch=epoch, mode='train')
# val
with torch.no_grad():
self.evaluator.reset_metrics()
for batch in self.valid_dataloader['rec'].get_rec_data(
self.rec_batch_size, shuffle=False):
self.step(batch, stage='rec', mode='val')
self.evaluator.report(epoch=epoch, mode='val')
# early stop
metric = self.evaluator.rec_metrics['hit@1'] + self.evaluator.rec_metrics['hit@50']
if self.early_stop(metric):
break
# test
with torch.no_grad():
self.evaluator.reset_metrics()
for batch in self.test_dataloader['rec'].get_rec_data(self.rec_batch_size,
shuffle=False):
self.step(batch, stage='rec', mode='test')
self.evaluator.report(mode='test')
def train_conversation(self):
self.init_optim(self.conv_optim_opt, self.conv_model.parameters())
for epoch in range(self.conv_epoch):
self.evaluator.reset_metrics()
logger.info(f'[Conversation epoch {str(epoch)}]')
for batch in self.train_dataloader['conv'].get_conv_data(
batch_size=self.conv_batch_size, shuffle=True):
self.step(batch, stage='conv', mode='train')
self.evaluator.report(epoch=epoch, mode='train')
# val
with torch.no_grad():
self.evaluator.reset_metrics()
for batch in self.valid_dataloader['conv'].get_conv_data(
batch_size=self.conv_batch_size, shuffle=False):
self.step(batch, stage='conv', mode='val')
self.evaluator.report(epoch=epoch, mode='val')
# early stop
metric = self.evaluator.gen_metrics['ppl']
if self.early_stop(metric):
break
# test
with torch.no_grad():
self.evaluator.reset_metrics()
for batch in self.test_dataloader['conv'].get_conv_data(
batch_size=self.conv_batch_size, shuffle=False):
self.step(batch, stage='conv', mode='test')
self.evaluator.report(mode='test')
def train_policy(self):
policy_params = list(self.policy_model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
params = [{
'params': [
p for n, p in policy_params
if not any(nd in n for nd in no_decay)
],
'weight_decay':
self.policy_optim_opt['weight_decay']
}, {
'params': [
p for n, p in policy_params
if any(nd in n for nd in no_decay)
],
}]
self.init_optim(self.policy_optim_opt, params)
for epoch in range(self.policy_epoch):
self.evaluator.reset_metrics()
logger.info(f'[Policy epoch {str(epoch)}]')
# change the shuffle to True
for batch in self.train_dataloader['policy'].get_policy_data(
self.policy_batch_size, shuffle=True):
self.step(batch, stage='policy', mode='train')
self.evaluator.report(epoch=epoch, mode='train')
# val
with torch.no_grad():
self.evaluator.reset_metrics()
for batch in self.valid_dataloader['policy'].get_policy_data(
self.policy_batch_size, shuffle=False):
self.step(batch, stage='policy', mode='val')
self.evaluator.report(epoch=epoch, mode='val')
# early stop
metric = self.evaluator.rec_metrics['hit@1'] + self.evaluator.rec_metrics['hit@50']
if self.early_stop(metric):
break
# test
with torch.no_grad():
self.evaluator.reset_metrics()
for batch in self.test_dataloader['policy'].get_policy_data(
self.policy_batch_size, shuffle=False):
self.step(batch, stage='policy', mode='test')
self.evaluator.report(mode='test')
def fit(self):
if hasattr(self, 'rec_model'):
self.train_recommender()
if hasattr(self, 'policy_model'):
self.train_policy()
if hasattr(self, 'conv_model'):
self.train_conversation()
def interact(self):
self.init_interact()
input_text = self.get_input(self.language)
while not self.finished:
# rec
if hasattr(self, 'rec_model'):
rec_input = self.process_input(input_text, 'rec')
scores = self.rec_model.forward(rec_input, 'infer')
scores = scores.cpu()[0]
scores = scores[self.item_ids]
_, rank = torch.topk(scores, 10, dim=-1)
item_ids = []
for r in rank.tolist():
item_ids.append(self.item_ids[r])
first_item_id = item_ids[:1]
self.update_context('rec', entity_ids=first_item_id, item_ids=first_item_id)
print(f"[Recommend]:")
for item_id in item_ids:
if item_id in self.id2entity:
print(self.id2entity[item_id])
# conv
if hasattr(self, 'conv_model'):
conv_input = self.process_input(input_text, 'conv')
preds = self.conv_model.forward(conv_input, 'infer').tolist()[0]
p_str = ind2txt(preds, self.ind2tok, self.end_token_idx)
token_ids, entity_ids, movie_ids, word_ids = self.convert_to_id(p_str, 'conv')
self.update_context('conv', token_ids, entity_ids, movie_ids, word_ids)
print(f"[Response]:\n{p_str}")
# input
input_text = self.get_input(self.language)
def process_input(self, input_text, stage):
token_ids, entity_ids, movie_ids, word_ids = self.convert_to_id(input_text, stage)
self.update_context(stage, token_ids, entity_ids, movie_ids, word_ids)
data = {'role': 'Seeker', 'context_tokens': self.context[stage]['context_tokens'],
'context_entities': self.context[stage]['context_entities'],
'context_words': self.context[stage]['context_words'],
'context_items': self.context[stage]['context_items'],
'user_profile': self.context[stage]['user_profile'],
'interaction_history': self.context[stage]['interaction_history']}
dataloader = get_dataloader(self.opt, data, self.vocab[stage])
if stage == 'rec':
data = dataloader.rec_interact(data)
elif stage == 'conv':
data = dataloader.conv_interact(data)
data = [ele.to(self.device) if isinstance(ele, torch.Tensor) else ele for ele in data]
return data
def convert_to_id(self, text, stage):
if self.language == 'zh':
tokens = self.tokenize(text, 'pkuseg')
elif self.language == 'en':
tokens = self.tokenize(text, 'nltk')
else:
raise
entities = self.link(tokens, self.side_data[stage]['entity_kg']['entity'])
words = self.link(tokens, self.side_data[stage]['word_kg']['entity'])
if self.opt['tokenize'][stage] in ('gpt2', 'bert'):
language = dataset_language_map[self.opt['dataset']]
path = os.path.join(self.opt.pretrain_path, self.opt['tokenize'][stage], language)
tokens = self.tokenize(text, 'bert', path)
token_ids = [self.vocab[stage]['tok2ind'].get(token, self.vocab[stage]['unk']) for token in tokens]
entity_ids = [self.vocab[stage]['entity2id'][entity] for entity in entities if
entity in self.vocab[stage]['entity2id']]
movie_ids = [entity_id for entity_id in entity_ids if entity_id in self.item_ids]
word_ids = [self.vocab[stage]['word2id'][word] for word in words if word in self.vocab[stage]['word2id']]
return token_ids, entity_ids, movie_ids, word_ids
| 45.450402
| 126
| 0.578246
|
fe5692224d8b9f8511c98a72df0ec05949186726
| 39,316
|
py
|
Python
|
venv/Lib/site-packages/pandas/tests/indexes/datetimes/test_constructors.py
|
itsAbdulKhadar/Machine-Learning-with-Streamlit
|
c8a0c7ca5a1bcf2730ae9587bcddfebe323965a3
|
[
"MIT"
] | 9
|
2019-05-29T23:50:28.000Z
|
2021-01-29T20:51:05.000Z
|
venv/Lib/site-packages/pandas/tests/indexes/datetimes/test_constructors.py
|
itsAbdulKhadar/Machine-Learning-with-Streamlit
|
c8a0c7ca5a1bcf2730ae9587bcddfebe323965a3
|
[
"MIT"
] | 37
|
2020-10-20T08:30:53.000Z
|
2020-12-22T13:15:45.000Z
|
venv/Lib/site-packages/pandas/tests/indexes/datetimes/test_constructors.py
|
itsAbdulKhadar/Machine-Learning-with-Streamlit
|
c8a0c7ca5a1bcf2730ae9587bcddfebe323965a3
|
[
"MIT"
] | 5
|
2021-01-19T14:06:34.000Z
|
2022-02-25T15:57:16.000Z
|
from datetime import datetime, timedelta, timezone
from functools import partial
from operator import attrgetter
import dateutil
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import OutOfBoundsDatetime, conversion
import pandas as pd
from pandas import DatetimeIndex, Index, Timestamp, date_range, offsets, to_datetime
import pandas._testing as tm
from pandas.core.arrays import DatetimeArray, period_array
class TestDatetimeIndex:
@pytest.mark.parametrize("dt_cls", [DatetimeIndex, DatetimeArray._from_sequence])
def test_freq_validation_with_nat(self, dt_cls):
# GH#11587 make sure we get a useful error message when generate_range
# raises
msg = (
"Inferred frequency None from passed values does not conform "
"to passed frequency D"
)
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp("2011-01-01")], freq="D")
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp("2011-01-01").value], freq="D")
# TODO: better place for tests shared by DTI/TDI?
@pytest.mark.parametrize(
"index",
[
pd.date_range("2016-01-01", periods=5, tz="US/Pacific"),
pd.timedelta_range("1 Day", periods=5),
],
)
def test_shallow_copy_inherits_array_freq(self, index):
# If we pass a DTA/TDA to shallow_copy and dont specify a freq,
# we should inherit the array's freq, not our own.
array = index._data
arr = array[[0, 3, 2, 4, 1]]
assert arr.freq is None
result = index._shallow_copy(arr)
assert result.freq is None
def test_categorical_preserves_tz(self):
# GH#18664 retain tz when going DTI-->Categorical-->DTI
# TODO: parametrize over DatetimeIndex/DatetimeArray
# once CategoricalIndex(DTA) works
dti = pd.DatetimeIndex(
[pd.NaT, "2015-01-01", "1999-04-06 15:14:13", "2015-01-01"], tz="US/Eastern"
)
ci = pd.CategoricalIndex(dti)
carr = pd.Categorical(dti)
cser = pd.Series(ci)
for obj in [ci, carr, cser]:
result = pd.DatetimeIndex(obj)
tm.assert_index_equal(result, dti)
def test_dti_with_period_data_raises(self):
# GH#23675
data = pd.PeriodIndex(["2016Q1", "2016Q2"], freq="Q")
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(period_array(data))
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(period_array(data))
def test_dti_with_timedelta64_data_raises(self):
# GH#23675 deprecated, enforrced in GH#29794
data = np.array([0], dtype="m8[ns]")
msg = r"timedelta64\[ns\] cannot be converted to datetime64"
with pytest.raises(TypeError, match=msg):
DatetimeIndex(data)
with pytest.raises(TypeError, match=msg):
to_datetime(data)
with pytest.raises(TypeError, match=msg):
DatetimeIndex(pd.TimedeltaIndex(data))
with pytest.raises(TypeError, match=msg):
to_datetime(pd.TimedeltaIndex(data))
def test_constructor_from_sparse_array(self):
# https://github.com/pandas-dev/pandas/issues/35843
values = [
Timestamp("2012-05-01T01:00:00.000000"),
Timestamp("2016-05-01T01:00:00.000000"),
]
arr = pd.arrays.SparseArray(values)
result = Index(arr)
expected = DatetimeIndex(values)
tm.assert_index_equal(result, expected)
def test_construction_caching(self):
df = pd.DataFrame(
{
"dt": pd.date_range("20130101", periods=3),
"dttz": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"dt_with_null": [
pd.Timestamp("20130101"),
pd.NaT,
pd.Timestamp("20130103"),
],
"dtns": pd.date_range("20130101", periods=3, freq="ns"),
}
)
assert df.dttz.dtype.tz.zone == "US/Eastern"
@pytest.mark.parametrize(
"kwargs",
[{"tz": "dtype.tz"}, {"dtype": "dtype"}, {"dtype": "dtype", "tz": "dtype.tz"}],
)
def test_construction_with_alt(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range("20130101", periods=5, freq="H", tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
result = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(i, result)
@pytest.mark.parametrize(
"kwargs",
[{"tz": "dtype.tz"}, {"dtype": "dtype"}, {"dtype": "dtype", "tz": "dtype.tz"}],
)
def test_construction_with_alt_tz_localize(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range("20130101", periods=5, freq="H", tz=tz)
i = i._with_freq(None)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
if "tz" in kwargs:
result = DatetimeIndex(i.asi8, tz="UTC").tz_convert(kwargs["tz"])
expected = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(result, expected)
# localize into the provided tz
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz="UTC")
expected = i.tz_localize(None).tz_localize("UTC")
tm.assert_index_equal(i2, expected)
# incompat tz/dtype
msg = "cannot supply both a tz and a dtype with a tz"
with pytest.raises(ValueError, match=msg):
DatetimeIndex(i.tz_localize(None).asi8, dtype=i.dtype, tz="US/Pacific")
def test_construction_index_with_mixed_timezones(self):
# gh-11488: no tz results in DatetimeIndex
result = Index([Timestamp("2011-01-01"), Timestamp("2011-01-02")], name="idx")
exp = DatetimeIndex(
[Timestamp("2011-01-01"), Timestamp("2011-01-02")], name="idx"
)
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# same tz results in DatetimeIndex
result = Index(
[
Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"),
Timestamp("2011-01-02 10:00", tz="Asia/Tokyo"),
],
name="idx",
)
exp = DatetimeIndex(
[Timestamp("2011-01-01 10:00"), Timestamp("2011-01-02 10:00")],
tz="Asia/Tokyo",
name="idx",
)
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index(
[
Timestamp("2011-01-01 10:00", tz="US/Eastern"),
Timestamp("2011-08-01 10:00", tz="US/Eastern"),
],
name="idx",
)
exp = DatetimeIndex(
[Timestamp("2011-01-01 10:00"), Timestamp("2011-08-01 10:00")],
tz="US/Eastern",
name="idx",
)
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# Different tz results in Index(dtype=object)
result = Index(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00", tz="US/Eastern"),
],
name="idx",
)
exp = Index(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00", tz="US/Eastern"),
],
dtype="object",
name="idx",
)
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
result = Index(
[
Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"),
Timestamp("2011-01-02 10:00", tz="US/Eastern"),
],
name="idx",
)
exp = Index(
[
Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"),
Timestamp("2011-01-02 10:00", tz="US/Eastern"),
],
dtype="object",
name="idx",
)
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
# length = 1
result = Index([Timestamp("2011-01-01")], name="idx")
exp = DatetimeIndex([Timestamp("2011-01-01")], name="idx")
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# length = 1 with tz
result = Index([Timestamp("2011-01-01 10:00", tz="Asia/Tokyo")], name="idx")
exp = DatetimeIndex(
[Timestamp("2011-01-01 10:00")], tz="Asia/Tokyo", name="idx"
)
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
def test_construction_index_with_mixed_timezones_with_NaT(self):
# see gh-11488
result = Index(
[pd.NaT, Timestamp("2011-01-01"), pd.NaT, Timestamp("2011-01-02")],
name="idx",
)
exp = DatetimeIndex(
[pd.NaT, Timestamp("2011-01-01"), pd.NaT, Timestamp("2011-01-02")],
name="idx",
)
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# Same tz results in DatetimeIndex
result = Index(
[
pd.NaT,
Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"),
pd.NaT,
Timestamp("2011-01-02 10:00", tz="Asia/Tokyo"),
],
name="idx",
)
exp = DatetimeIndex(
[
pd.NaT,
Timestamp("2011-01-01 10:00"),
pd.NaT,
Timestamp("2011-01-02 10:00"),
],
tz="Asia/Tokyo",
name="idx",
)
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index(
[
Timestamp("2011-01-01 10:00", tz="US/Eastern"),
pd.NaT,
Timestamp("2011-08-01 10:00", tz="US/Eastern"),
],
name="idx",
)
exp = DatetimeIndex(
[Timestamp("2011-01-01 10:00"), pd.NaT, Timestamp("2011-08-01 10:00")],
tz="US/Eastern",
name="idx",
)
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# different tz results in Index(dtype=object)
result = Index(
[
pd.NaT,
Timestamp("2011-01-01 10:00"),
pd.NaT,
Timestamp("2011-01-02 10:00", tz="US/Eastern"),
],
name="idx",
)
exp = Index(
[
pd.NaT,
Timestamp("2011-01-01 10:00"),
pd.NaT,
Timestamp("2011-01-02 10:00", tz="US/Eastern"),
],
dtype="object",
name="idx",
)
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
result = Index(
[
pd.NaT,
Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"),
pd.NaT,
Timestamp("2011-01-02 10:00", tz="US/Eastern"),
],
name="idx",
)
exp = Index(
[
pd.NaT,
Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"),
pd.NaT,
Timestamp("2011-01-02 10:00", tz="US/Eastern"),
],
dtype="object",
name="idx",
)
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
# all NaT
result = Index([pd.NaT, pd.NaT], name="idx")
exp = DatetimeIndex([pd.NaT, pd.NaT], name="idx")
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# all NaT with tz
result = Index([pd.NaT, pd.NaT], tz="Asia/Tokyo", name="idx")
exp = DatetimeIndex([pd.NaT, pd.NaT], tz="Asia/Tokyo", name="idx")
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
def test_construction_dti_with_mixed_timezones(self):
# GH 11488 (not changed, added explicit tests)
# no tz results in DatetimeIndex
result = DatetimeIndex(
[Timestamp("2011-01-01"), Timestamp("2011-01-02")], name="idx"
)
exp = DatetimeIndex(
[Timestamp("2011-01-01"), Timestamp("2011-01-02")], name="idx"
)
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# same tz results in DatetimeIndex
result = DatetimeIndex(
[
Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"),
Timestamp("2011-01-02 10:00", tz="Asia/Tokyo"),
],
name="idx",
)
exp = DatetimeIndex(
[Timestamp("2011-01-01 10:00"), Timestamp("2011-01-02 10:00")],
tz="Asia/Tokyo",
name="idx",
)
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# same tz results in DatetimeIndex (DST)
result = DatetimeIndex(
[
Timestamp("2011-01-01 10:00", tz="US/Eastern"),
Timestamp("2011-08-01 10:00", tz="US/Eastern"),
],
name="idx",
)
exp = DatetimeIndex(
[Timestamp("2011-01-01 10:00"), Timestamp("2011-08-01 10:00")],
tz="US/Eastern",
name="idx",
)
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# tz mismatch affecting to tz-aware raises TypeError/ValueError
msg = "cannot be converted to datetime64"
with pytest.raises(ValueError, match=msg):
DatetimeIndex(
[
Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"),
Timestamp("2011-01-02 10:00", tz="US/Eastern"),
],
name="idx",
)
with pytest.raises(ValueError, match=msg):
DatetimeIndex(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00", tz="US/Eastern"),
],
tz="Asia/Tokyo",
name="idx",
)
with pytest.raises(ValueError, match=msg):
DatetimeIndex(
[
Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"),
Timestamp("2011-01-02 10:00", tz="US/Eastern"),
],
tz="US/Eastern",
name="idx",
)
with pytest.raises(ValueError, match=msg):
# passing tz should results in DatetimeIndex, then mismatch raises
# TypeError
Index(
[
pd.NaT,
Timestamp("2011-01-01 10:00"),
pd.NaT,
Timestamp("2011-01-02 10:00", tz="US/Eastern"),
],
tz="Asia/Tokyo",
name="idx",
)
def test_construction_base_constructor(self):
arr = [pd.Timestamp("2011-01-01"), pd.NaT, pd.Timestamp("2011-01-03")]
tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)), pd.DatetimeIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Timestamp("2011-01-03")]
tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)), pd.DatetimeIndex(np.array(arr)))
def test_construction_outofbounds(self):
# GH 13663
dates = [
datetime(3000, 1, 1),
datetime(4000, 1, 1),
datetime(5000, 1, 1),
datetime(6000, 1, 1),
]
exp = Index(dates, dtype=object)
# coerces to object
tm.assert_index_equal(Index(dates), exp)
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
# can't create DatetimeIndex
DatetimeIndex(dates)
def test_construction_with_ndarray(self):
# GH 5152
dates = [datetime(2013, 10, 7), datetime(2013, 10, 8), datetime(2013, 10, 9)]
data = DatetimeIndex(dates, freq=pd.offsets.BDay()).values
result = DatetimeIndex(data, freq=pd.offsets.BDay())
expected = DatetimeIndex(["2013-10-07", "2013-10-08", "2013-10-09"], freq="B")
tm.assert_index_equal(result, expected)
def test_integer_values_and_tz_interpreted_as_utc(self):
# GH-24559
val = np.datetime64("2000-01-01 00:00:00", "ns")
values = np.array([val.view("i8")])
result = DatetimeIndex(values).tz_localize("US/Central")
expected = pd.DatetimeIndex(["2000-01-01T00:00:00"], tz="US/Central")
tm.assert_index_equal(result, expected)
# but UTC is *not* deprecated.
with tm.assert_produces_warning(None):
result = DatetimeIndex(values, tz="UTC")
expected = pd.DatetimeIndex(["2000-01-01T00:00:00"], tz="US/Central")
def test_constructor_coverage(self):
rng = date_range("1/1/2000", periods=10.5)
exp = date_range("1/1/2000", periods=10)
tm.assert_index_equal(rng, exp)
msg = "periods must be a number, got foo"
with pytest.raises(TypeError, match=msg):
date_range(start="1/1/2000", periods="foo", freq="D")
msg = "DatetimeIndex\\(\\) must be called with a collection"
with pytest.raises(TypeError, match=msg):
DatetimeIndex("1/1/2000")
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex(
[datetime(2000, 1, 1) + timedelta(i) for i in range(10)]
)
tm.assert_index_equal(result, expected)
# NumPy string array
strings = np.array(["2000-01-01", "2000-01-02", "2000-01-03"])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype("O"))
tm.assert_index_equal(result, expected)
from_ints = DatetimeIndex(expected.asi8)
tm.assert_index_equal(from_ints, expected)
# string with NaT
strings = np.array(["2000-01-01", "2000-01-02", "NaT"])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype("O"))
tm.assert_index_equal(result, expected)
from_ints = DatetimeIndex(expected.asi8)
tm.assert_index_equal(from_ints, expected)
# non-conforming
msg = (
"Inferred frequency None from passed values does not conform "
"to passed frequency D"
)
with pytest.raises(ValueError, match=msg):
DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-04"], freq="D")
msg = (
"Of the four parameters: start, end, periods, and freq, exactly "
"three must be specified"
)
with pytest.raises(ValueError, match=msg):
date_range(start="2011-01-01", freq="b")
with pytest.raises(ValueError, match=msg):
date_range(end="2011-01-01", freq="B")
with pytest.raises(ValueError, match=msg):
date_range(periods=10, freq="D")
@pytest.mark.parametrize("freq", ["AS", "W-SUN"])
def test_constructor_datetime64_tzformat(self, freq):
# see GH#6572: ISO 8601 format results in pytz.FixedOffset
idx = date_range(
"2013-01-01T00:00:00-05:00", "2016-01-01T23:59:59-05:00", freq=freq
)
expected = date_range(
"2013-01-01T00:00:00",
"2016-01-01T23:59:59",
freq=freq,
tz=pytz.FixedOffset(-300),
)
tm.assert_index_equal(idx, expected)
# Unable to use `US/Eastern` because of DST
expected_i8 = date_range(
"2013-01-01T00:00:00", "2016-01-01T23:59:59", freq=freq, tz="America/Lima"
)
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
idx = date_range(
"2013-01-01T00:00:00+09:00", "2016-01-01T23:59:59+09:00", freq=freq
)
expected = date_range(
"2013-01-01T00:00:00",
"2016-01-01T23:59:59",
freq=freq,
tz=pytz.FixedOffset(540),
)
tm.assert_index_equal(idx, expected)
expected_i8 = date_range(
"2013-01-01T00:00:00", "2016-01-01T23:59:59", freq=freq, tz="Asia/Tokyo"
)
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
# Non ISO 8601 format results in dateutil.tz.tzoffset
idx = date_range("2013/1/1 0:00:00-5:00", "2016/1/1 23:59:59-5:00", freq=freq)
expected = date_range(
"2013-01-01T00:00:00",
"2016-01-01T23:59:59",
freq=freq,
tz=pytz.FixedOffset(-300),
)
tm.assert_index_equal(idx, expected)
# Unable to use `US/Eastern` because of DST
expected_i8 = date_range(
"2013-01-01T00:00:00", "2016-01-01T23:59:59", freq=freq, tz="America/Lima"
)
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
idx = date_range("2013/1/1 0:00:00+9:00", "2016/1/1 23:59:59+09:00", freq=freq)
expected = date_range(
"2013-01-01T00:00:00",
"2016-01-01T23:59:59",
freq=freq,
tz=pytz.FixedOffset(540),
)
tm.assert_index_equal(idx, expected)
expected_i8 = date_range(
"2013-01-01T00:00:00", "2016-01-01T23:59:59", freq=freq, tz="Asia/Tokyo"
)
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
def test_constructor_dtype(self):
# passing a dtype with a tz should localize
idx = DatetimeIndex(
["2013-01-01", "2013-01-02"], dtype="datetime64[ns, US/Eastern]"
)
expected = DatetimeIndex(["2013-01-01", "2013-01-02"]).tz_localize("US/Eastern")
tm.assert_index_equal(idx, expected)
idx = DatetimeIndex(["2013-01-01", "2013-01-02"], tz="US/Eastern")
tm.assert_index_equal(idx, expected)
# if we already have a tz and its not the same, then raise
idx = DatetimeIndex(
["2013-01-01", "2013-01-02"], dtype="datetime64[ns, US/Eastern]"
)
msg = (
"cannot supply both a tz and a timezone-naive dtype "
r"\(i\.e\. datetime64\[ns\]\)"
)
with pytest.raises(ValueError, match=msg):
DatetimeIndex(idx, dtype="datetime64[ns]")
# this is effectively trying to convert tz's
msg = "data is already tz-aware US/Eastern, unable to set specified tz: CET"
with pytest.raises(TypeError, match=msg):
DatetimeIndex(idx, dtype="datetime64[ns, CET]")
msg = "cannot supply both a tz and a dtype with a tz"
with pytest.raises(ValueError, match=msg):
DatetimeIndex(idx, tz="CET", dtype="datetime64[ns, US/Eastern]")
result = DatetimeIndex(idx, dtype="datetime64[ns, US/Eastern]")
tm.assert_index_equal(idx, result)
@pytest.mark.parametrize("dtype", [object, np.int32, np.int64])
def test_constructor_invalid_dtype_raises(self, dtype):
# GH 23986
msg = "Unexpected value for 'dtype'"
with pytest.raises(ValueError, match=msg):
DatetimeIndex([1, 2], dtype=dtype)
def test_constructor_name(self):
idx = date_range(start="2000-01-01", periods=1, freq="A", name="TEST")
assert idx.name == "TEST"
def test_000constructor_resolution(self):
# 2252
t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)
idx = DatetimeIndex([t1])
assert idx.nanosecond[0] == t1.nanosecond
def test_disallow_setting_tz(self):
# GH 3746
dti = DatetimeIndex(["2010"], tz="UTC")
msg = "Cannot directly set timezone"
with pytest.raises(AttributeError, match=msg):
dti.tz = pytz.timezone("US/Pacific")
@pytest.mark.parametrize(
"tz",
[
None,
"America/Los_Angeles",
pytz.timezone("America/Los_Angeles"),
Timestamp("2000", tz="America/Los_Angeles").tz,
],
)
def test_constructor_start_end_with_tz(self, tz):
# GH 18595
start = Timestamp("2013-01-01 06:00:00", tz="America/Los_Angeles")
end = Timestamp("2013-01-02 06:00:00", tz="America/Los_Angeles")
result = date_range(freq="D", start=start, end=end, tz=tz)
expected = DatetimeIndex(
["2013-01-01 06:00:00", "2013-01-02 06:00:00"],
tz="America/Los_Angeles",
freq="D",
)
tm.assert_index_equal(result, expected)
# Especially assert that the timezone is consistent for pytz
assert pytz.timezone("America/Los_Angeles") is result.tz
@pytest.mark.parametrize("tz", ["US/Pacific", "US/Eastern", "Asia/Tokyo"])
def test_constructor_with_non_normalized_pytz(self, tz):
# GH 18595
non_norm_tz = Timestamp("2010", tz=tz).tz
result = DatetimeIndex(["2010"], tz=non_norm_tz)
assert pytz.timezone(tz) is result.tz
def test_constructor_timestamp_near_dst(self):
# GH 20854
ts = [
Timestamp("2016-10-30 03:00:00+0300", tz="Europe/Helsinki"),
Timestamp("2016-10-30 03:00:00+0200", tz="Europe/Helsinki"),
]
result = DatetimeIndex(ts)
expected = DatetimeIndex([ts[0].to_pydatetime(), ts[1].to_pydatetime()])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("klass", [Index, DatetimeIndex])
@pytest.mark.parametrize("box", [np.array, partial(np.array, dtype=object), list])
@pytest.mark.parametrize(
"tz, dtype",
[("US/Pacific", "datetime64[ns, US/Pacific]"), (None, "datetime64[ns]")],
)
def test_constructor_with_int_tz(self, klass, box, tz, dtype):
# GH 20997, 20964
ts = Timestamp("2018-01-01", tz=tz)
result = klass(box([ts.value]), dtype=dtype)
expected = klass([ts])
assert result == expected
def test_construction_int_rountrip(self, tz_naive_fixture):
# GH 12619, GH#24559
tz = tz_naive_fixture
result = 1293858000000000000
expected = DatetimeIndex([result], tz=tz).asi8[0]
assert result == expected
def test_construction_from_replaced_timestamps_with_dst(self):
# GH 18785
index = pd.date_range(
pd.Timestamp(2000, 1, 1),
pd.Timestamp(2005, 1, 1),
freq="MS",
tz="Australia/Melbourne",
)
test = pd.DataFrame({"data": range(len(index))}, index=index)
test = test.resample("Y").mean()
result = pd.DatetimeIndex([x.replace(month=6, day=1) for x in test.index])
expected = pd.DatetimeIndex(
[
"2000-06-01 00:00:00",
"2001-06-01 00:00:00",
"2002-06-01 00:00:00",
"2003-06-01 00:00:00",
"2004-06-01 00:00:00",
"2005-06-01 00:00:00",
],
tz="Australia/Melbourne",
)
tm.assert_index_equal(result, expected)
def test_construction_with_tz_and_tz_aware_dti(self):
# GH 23579
dti = date_range("2016-01-01", periods=3, tz="US/Central")
msg = "data is already tz-aware US/Central, unable to set specified tz"
with pytest.raises(TypeError, match=msg):
DatetimeIndex(dti, tz="Asia/Tokyo")
def test_construction_with_nat_and_tzlocal(self):
tz = dateutil.tz.tzlocal()
result = DatetimeIndex(["2018", "NaT"], tz=tz)
expected = DatetimeIndex([Timestamp("2018", tz=tz), pd.NaT])
tm.assert_index_equal(result, expected)
def test_constructor_no_precision_raises(self):
# GH-24753, GH-24739
msg = "with no precision is not allowed"
with pytest.raises(ValueError, match=msg):
pd.DatetimeIndex(["2000"], dtype="datetime64")
with pytest.raises(ValueError, match=msg):
pd.Index(["2000"], dtype="datetime64")
def test_constructor_wrong_precision_raises(self):
msg = "Unexpected value for 'dtype': 'datetime64\\[us\\]'"
with pytest.raises(ValueError, match=msg):
pd.DatetimeIndex(["2000"], dtype="datetime64[us]")
def test_index_constructor_with_numpy_object_array_and_timestamp_tz_with_nan(self):
# GH 27011
result = Index(np.array([Timestamp("2019", tz="UTC"), np.nan], dtype=object))
expected = DatetimeIndex([Timestamp("2019", tz="UTC"), pd.NaT])
tm.assert_index_equal(result, expected)
class TestTimeSeries:
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range("1/1/2000", "1/2/2000", freq="5min")
rng2 = DatetimeIndex(rng)
assert rng.freq == rng2.freq
def test_explicit_none_freq(self):
# Explicitly passing freq=None is respected
rng = date_range("1/1/2000", "1/2/2000", freq="5min")
result = DatetimeIndex(rng, freq=None)
assert result.freq is None
result = DatetimeIndex(rng._data, freq=None)
assert result.freq is None
def test_dti_constructor_years_only(self, tz_naive_fixture):
tz = tz_naive_fixture
# GH 6961
rng1 = date_range("2014", "2015", freq="M", tz=tz)
expected1 = date_range("2014-01-31", "2014-12-31", freq="M", tz=tz)
rng2 = date_range("2014", "2015", freq="MS", tz=tz)
expected2 = date_range("2014-01-01", "2015-01-01", freq="MS", tz=tz)
rng3 = date_range("2014", "2020", freq="A", tz=tz)
expected3 = date_range("2014-12-31", "2019-12-31", freq="A", tz=tz)
rng4 = date_range("2014", "2020", freq="AS", tz=tz)
expected4 = date_range("2014-01-01", "2020-01-01", freq="AS", tz=tz)
for rng, expected in [
(rng1, expected1),
(rng2, expected2),
(rng3, expected3),
(rng4, expected4),
]:
tm.assert_index_equal(rng, expected)
def test_dti_constructor_small_int(self, any_int_dtype):
# see gh-13721
exp = DatetimeIndex(
[
"1970-01-01 00:00:00.00000000",
"1970-01-01 00:00:00.00000001",
"1970-01-01 00:00:00.00000002",
]
)
arr = np.array([0, 10, 20], dtype=any_int_dtype)
tm.assert_index_equal(DatetimeIndex(arr), exp)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(["1-1-2000 00:00:01"])
assert rng[0].second == 1
def test_is_(self):
dti = date_range(start="1/1/2005", end="12/1/2005", freq="M")
assert dti.is_(dti)
assert dti.is_(dti.view())
assert not dti.is_(dti.copy())
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view("M8[D]")
idx = Index(arr)
assert (idx.values == conversion.ensure_datetime64ns(arr)).all()
def test_constructor_int64_nocopy(self):
# GH#1624
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr)
arr[50:100] = -1
assert (index.asi8[50:100] == -1).all()
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr, copy=True)
arr[50:100] = -1
assert (index.asi8[50:100] != -1).all()
@pytest.mark.parametrize(
"freq", ["M", "Q", "A", "D", "B", "BH", "T", "S", "L", "U", "H", "N", "C"]
)
def test_from_freq_recreate_from_data(self, freq):
org = date_range(start="2001/02/01 09:00", freq=freq, periods=1)
idx = DatetimeIndex(org, freq=freq)
tm.assert_index_equal(idx, org)
org = date_range(
start="2001/02/01 09:00", freq=freq, tz="US/Pacific", periods=1
)
idx = DatetimeIndex(org, freq=freq, tz="US/Pacific")
tm.assert_index_equal(idx, org)
def test_datetimeindex_constructor_misc(self):
arr = ["1/1/2005", "1/2/2005", "Jn 3, 2005", "2005-01-04"]
msg = r"(\(')?Unknown string format(:', 'Jn 3, 2005'\))?"
with pytest.raises(ValueError, match=msg):
DatetimeIndex(arr)
arr = ["1/1/2005", "1/2/2005", "1/3/2005", "2005-01-04"]
idx1 = DatetimeIndex(arr)
arr = [datetime(2005, 1, 1), "1/2/2005", "1/3/2005", "2005-01-04"]
idx2 = DatetimeIndex(arr)
arr = [Timestamp(datetime(2005, 1, 1)), "1/2/2005", "1/3/2005", "2005-01-04"]
idx3 = DatetimeIndex(arr)
arr = np.array(["1/1/2005", "1/2/2005", "1/3/2005", "2005-01-04"], dtype="O")
idx4 = DatetimeIndex(arr)
arr = to_datetime(["1/1/2005", "1/2/2005", "1/3/2005", "2005-01-04"])
idx5 = DatetimeIndex(arr)
arr = to_datetime(["1/1/2005", "1/2/2005", "Jan 3, 2005", "2005-01-04"])
idx6 = DatetimeIndex(arr)
idx7 = DatetimeIndex(["12/05/2007", "25/01/2008"], dayfirst=True)
idx8 = DatetimeIndex(
["2007/05/12", "2008/01/25"], dayfirst=False, yearfirst=True
)
tm.assert_index_equal(idx7, idx8)
for other in [idx2, idx3, idx4, idx5, idx6]:
assert (idx1.values == other.values).all()
sdate = datetime(1999, 12, 25)
edate = datetime(2000, 1, 1)
idx = date_range(start=sdate, freq="1B", periods=20)
assert len(idx) == 20
assert idx[0] == sdate + 0 * offsets.BDay()
assert idx.freq == "B"
idx1 = date_range(start=sdate, end=edate, freq="W-SUN")
idx2 = date_range(start=sdate, end=edate, freq=offsets.Week(weekday=6))
assert len(idx1) == len(idx2)
assert idx1.freq == idx2.freq
idx1 = date_range(start=sdate, end=edate, freq="QS")
idx2 = date_range(
start=sdate, end=edate, freq=offsets.QuarterBegin(startingMonth=1)
)
assert len(idx1) == len(idx2)
assert idx1.freq == idx2.freq
idx1 = date_range(start=sdate, end=edate, freq="BQ")
idx2 = date_range(
start=sdate, end=edate, freq=offsets.BQuarterEnd(startingMonth=12)
)
assert len(idx1) == len(idx2)
assert idx1.freq == idx2.freq
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range("1/1/2000", "3/1/2000")
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
tm.assert_numpy_array_equal(idx.values, expected.values)
def test_date_range_tuple_freq_raises(self):
# GH#34703
edate = datetime(2000, 1, 1)
with pytest.raises(TypeError, match="pass as a string instead"):
date_range(end=edate, freq=("D", 5), periods=20)
def test_timestamp_constructor_invalid_fold_raise():
# Test for #25057
# Valid fold values are only [None, 0, 1]
msg = "Valid values for the fold argument are None, 0, or 1."
with pytest.raises(ValueError, match=msg):
Timestamp(123, fold=2)
def test_timestamp_constructor_pytz_fold_raise():
# Test for #25057
# pytz doesn't support fold. Check that we raise
# if fold is passed with pytz
msg = "pytz timezones do not support fold. Please use dateutil timezones."
tz = pytz.timezone("Europe/London")
with pytest.raises(ValueError, match=msg):
Timestamp(datetime(2019, 10, 27, 0, 30, 0, 0), tz=tz, fold=0)
@pytest.mark.parametrize("fold", [0, 1])
@pytest.mark.parametrize(
"ts_input",
[
1572136200000000000,
1572136200000000000.0,
np.datetime64(1572136200000000000, "ns"),
"2019-10-27 01:30:00+01:00",
datetime(2019, 10, 27, 0, 30, 0, 0, tzinfo=timezone.utc),
],
)
def test_timestamp_constructor_fold_conflict(ts_input, fold):
# Test for #25057
# Check that we raise on fold conflict
msg = (
"Cannot pass fold with possibly unambiguous input: int, float, "
"numpy.datetime64, str, or timezone-aware datetime-like. "
"Pass naive datetime-like or build Timestamp from components."
)
with pytest.raises(ValueError, match=msg):
Timestamp(ts_input=ts_input, fold=fold)
@pytest.mark.parametrize("tz", ["dateutil/Europe/London", None])
@pytest.mark.parametrize("fold", [0, 1])
def test_timestamp_constructor_retain_fold(tz, fold):
# Test for #25057
# Check that we retain fold
ts = pd.Timestamp(year=2019, month=10, day=27, hour=1, minute=30, tz=tz, fold=fold)
result = ts.fold
expected = fold
assert result == expected
@pytest.mark.parametrize("tz", ["dateutil/Europe/London"])
@pytest.mark.parametrize(
"ts_input,fold_out",
[
(1572136200000000000, 0),
(1572139800000000000, 1),
("2019-10-27 01:30:00+01:00", 0),
("2019-10-27 01:30:00+00:00", 1),
(datetime(2019, 10, 27, 1, 30, 0, 0, fold=0), 0),
(datetime(2019, 10, 27, 1, 30, 0, 0, fold=1), 1),
],
)
def test_timestamp_constructor_infer_fold_from_value(tz, ts_input, fold_out):
# Test for #25057
# Check that we infer fold correctly based on timestamps since utc
# or strings
ts = pd.Timestamp(ts_input, tz=tz)
result = ts.fold
expected = fold_out
assert result == expected
@pytest.mark.parametrize("tz", ["dateutil/Europe/London"])
@pytest.mark.parametrize(
"ts_input,fold,value_out",
[
(datetime(2019, 10, 27, 1, 30, 0, 0), 0, 1572136200000000000),
(datetime(2019, 10, 27, 1, 30, 0, 0), 1, 1572139800000000000),
],
)
def test_timestamp_constructor_adjust_value_for_fold(tz, ts_input, fold, value_out):
# Test for #25057
# Check that we adjust value for fold correctly
# based on timestamps since utc
ts = pd.Timestamp(ts_input, tz=tz, fold=fold)
result = ts.value
expected = value_out
assert result == expected
| 36.235945
| 88
| 0.572362
|
d5119b1cad74d8e7de7e482296a961169ad1a750
| 4,923
|
py
|
Python
|
setup.py
|
parrotpock/graphite-web
|
e33a2735fc174c00c5a413922aa41f7432d5b6f4
|
[
"Apache-2.0"
] | 4,281
|
2015-01-01T12:35:03.000Z
|
2022-03-31T20:06:59.000Z
|
setup.py
|
parrotpock/graphite-web
|
e33a2735fc174c00c5a413922aa41f7432d5b6f4
|
[
"Apache-2.0"
] | 1,809
|
2015-01-01T21:16:36.000Z
|
2022-03-31T21:25:13.000Z
|
setup.py
|
parrotpock/graphite-web
|
e33a2735fc174c00c5a413922aa41f7432d5b6f4
|
[
"Apache-2.0"
] | 970
|
2015-01-02T19:49:21.000Z
|
2022-03-27T09:48:44.000Z
|
#!/usr/bin/env python
from __future__ import with_statement
import os
try:
from ConfigParser import ConfigParser, DuplicateSectionError # Python 2
except ImportError:
from configparser import ConfigParser, DuplicateSectionError # Python 3
from glob import glob
from collections import defaultdict
# io.StringIO is strictly unicode only. Python 2 StringIO.StringIO accepts
# bytes, so we'll conveniently ignore decoding and reencoding the file there.
try:
from StringIO import StringIO # Python 2
except ImportError:
from io import StringIO # Python 3
# Graphite historically has an install prefix set in setup.cfg. Being in a
# configuration file, it's not easy to override it or unset it (for installing
# graphite in a virtualenv for instance).
# The prefix is now set by ``setup.py`` and *unset* if an environment variable
# named ``GRAPHITE_NO_PREFIX`` is present.
# While ``setup.cfg`` doesn't contain the prefix anymore, the *unset* step is
# required for installations from a source tarball because running
# ``python setup.py sdist`` will re-add the prefix to the tarball's
# ``setup.cfg``.
with open('setup.cfg', 'r') as f:
orig_setup_cfg = f.read()
cf = ConfigParser()
cf.readfp(StringIO(orig_setup_cfg), 'setup.cfg')
if os.environ.get('GRAPHITE_NO_PREFIX') or os.environ.get('READTHEDOCS'):
cf.remove_section('install')
else:
try:
cf.add_section('install')
except DuplicateSectionError:
pass
if not cf.has_option('install', 'prefix'):
cf.set('install', 'prefix', '/opt/graphite')
if not cf.has_option('install', 'install-lib'):
cf.set('install', 'install-lib', '%(prefix)s/webapp')
with open('setup.cfg', 'w') as f:
cf.write(f)
if os.environ.get('USE_SETUPTOOLS'):
from setuptools import setup
setup_kwargs = dict(zip_safe=0)
else:
from distutils.core import setup
setup_kwargs = dict()
storage_dirs = []
for subdir in ('whisper/dummy.txt', 'ceres/dummy.txt', 'rrd/dummy.txt', 'log/dummy.txt', 'log/webapp/dummy.txt'):
storage_dirs.append( ('storage/%s' % subdir, []) )
webapp_content = defaultdict(list)
for root, dirs, files in os.walk('webapp/content'):
for filename in files:
filepath = os.path.join(root, filename)
webapp_content[root].append(filepath)
conf_files = [ ('conf', glob('conf/*.example')) ]
examples = [ ('examples', glob('examples/example-*')) ]
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
return f.read()
try:
setup(
name='graphite-web',
version='1.2.0',
url='http://graphiteapp.org/',
author='Chris Davis',
author_email='chrismd@gmail.com',
license='Apache Software License 2.0',
description='Enterprise scalable realtime graphing',
long_description=read('README.md'),
long_description_content_type='text/markdown',
package_dir={'' : 'webapp'},
packages=[
'graphite',
'graphite.account',
'graphite.account.migrations',
'graphite.browser',
'graphite.composer',
'graphite.dashboard',
'graphite.dashboard.migrations',
'graphite.events',
'graphite.events.migrations',
'graphite.finders',
'graphite.functions',
'graphite.functions.custom',
'graphite.metrics',
'graphite.readers',
'graphite.render',
'graphite.tags',
'graphite.tags.migrations',
'graphite.url_shortener',
'graphite.url_shortener.migrations',
'graphite.version',
'graphite.whitelist',
'graphite.worker_pool',
],
package_data={'graphite' :
['templates/*', 'local_settings.py.example']},
scripts=glob('bin/*'),
data_files=list(webapp_content.items()) + storage_dirs + conf_files + examples,
install_requires=['Django>=1.8,<3.1', 'django-tagging==0.4.3', 'pytz',
'pyparsing', 'cairocffi', 'urllib3',
'scandir;python_version<"3.5"', 'six'],
classifiers=[
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
**setup_kwargs
)
finally:
with open('setup.cfg', 'w') as f:
f.write(orig_setup_cfg)
| 34.1875
| 113
| 0.640057
|
a81a5639bb7e2ba562ae04badad3743f39023905
| 3,030
|
py
|
Python
|
fscognitive/core/cognitive/person_group_cognitive.py
|
anhhoangiot/people_recognition_pi
|
92ceaebdef775a42023760360689d473662cb361
|
[
"MIT"
] | null | null | null |
fscognitive/core/cognitive/person_group_cognitive.py
|
anhhoangiot/people_recognition_pi
|
92ceaebdef775a42023760360689d473662cb361
|
[
"MIT"
] | null | null | null |
fscognitive/core/cognitive/person_group_cognitive.py
|
anhhoangiot/people_recognition_pi
|
92ceaebdef775a42023760360689d473662cb361
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2016-10-08
# @Author : Anh Hoang (anhhoang.work.mail@gmail.com)
# @Project : FSCognitive
# @Version : 1.0
from cognitive import Cognitive
from commons import EventLogger
logger = EventLogger.logger()
class PersonGroupCognitive(Cognitive):
"""Intermidiate module works as an interface between group model and MS service
Attributes:
group (PersonGroup): group object which
initialized instance of this class
"""
def __init__(self, group):
super(PersonGroupCognitive, self).__init__()
self.group = group
def identify(self, faces):
"""Identify a group of people from captured faces
Args:
faces (Array): list of captured faces id
returned from MS service
Returns:
Array: list of people identified from faces
"""
logger.log('Identifying...')
candidates = []
try:
response = self.api.face.identify(faces, self.group.id)
people = self.dictionarize(response)
candidates = []
for person in people:
candidate = person['candidates'][0]['personId']
candidates.append(candidate)
except self.api.CognitiveFaceException as exception:
logger.log(exception)
finally:
return candidates
def save(self):
"""Save a new person group in MS service"""
logger.log('Saving person group...')
# Only create group if it does not exist
if self.isExisted() is False:
try:
self.api.person_group.create(self.group.id, self.group.name)
logger.log(
'Created person group with name %s' % self.group.name
)
return True
except self.api.CognitiveFaceException as exception:
logger.log(exception)
return False
return True
def isExisted(self):
"""Check if group is existed or not"""
try:
self.api.person_group.get(self.group.id)
return True
except self.api.CognitiveFaceException as exception:
logger.log(exception)
return False
def train(self):
"""Enqueue a group to be trained"""
logger.log('Enqueue group training task...')
result = self.api.person_group.train(self.group.id)
self.processResponse(result, self.print_response)
def trainingStatus(self):
"""Get training status"""
logger.log('Fetching training status...')
result = self.api.person_group.get_status(self.group.id)
return self.processResponse(result, self.print_response)
def processResponse(self, response, callback=None):
response = self.dictionarize(response)
if callback:
callback(response)
return response
| 32.234043
| 83
| 0.584158
|
df90adeb328d5a73a4eb8347bff5c91c1dc9d039
| 1,482
|
py
|
Python
|
core/migrations/0001_initial.py
|
Kigamekun/ChatDjango
|
fd9af822cb523362378b175ded33e8dc07b22711
|
[
"MIT"
] | null | null | null |
core/migrations/0001_initial.py
|
Kigamekun/ChatDjango
|
fd9af822cb523362378b175ded33e8dc07b22711
|
[
"MIT"
] | null | null | null |
core/migrations/0001_initial.py
|
Kigamekun/ChatDjango
|
fd9af822cb523362378b175ded33e8dc07b22711
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.7 on 2021-04-11 10:55
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.CharField(max_length=1200)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('is_read', models.BooleanField(default=False)),
('receiver', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='receiver', to=settings.AUTH_USER_MODEL)),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sender', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('timestamp',),
},
),
]
| 38
| 147
| 0.615385
|
b34fae31d3de932429e762bdb38d9b0b50def337
| 112
|
py
|
Python
|
Laelia/settings/staging.py
|
arantesdv/LaeliaAppProject
|
93fca5393cb8406694903d9adde02067480c792e
|
[
"MIT"
] | null | null | null |
Laelia/settings/staging.py
|
arantesdv/LaeliaAppProject
|
93fca5393cb8406694903d9adde02067480c792e
|
[
"MIT"
] | null | null | null |
Laelia/settings/staging.py
|
arantesdv/LaeliaAppProject
|
93fca5393cb8406694903d9adde02067480c792e
|
[
"MIT"
] | null | null | null |
from ._base import *
DEBUG=False
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
| 12.444444
| 63
| 0.732143
|
9597a02c03ba7f4d7b994ed7f668a22c96b48abf
| 284
|
py
|
Python
|
Curso_Python_3_UDEMY/funcao/gerador_html_v1.py
|
DanilooSilva/Cursos_de_Python
|
8f167a4c6e16f01601e23b6f107578aa1454472d
|
[
"MIT"
] | null | null | null |
Curso_Python_3_UDEMY/funcao/gerador_html_v1.py
|
DanilooSilva/Cursos_de_Python
|
8f167a4c6e16f01601e23b6f107578aa1454472d
|
[
"MIT"
] | null | null | null |
Curso_Python_3_UDEMY/funcao/gerador_html_v1.py
|
DanilooSilva/Cursos_de_Python
|
8f167a4c6e16f01601e23b6f107578aa1454472d
|
[
"MIT"
] | null | null | null |
def tag_bloco(texto, clase='success'):
return f'<div class="{clase}">{texto}</div>'
if __name__ == '__main__':
# Testes (assertions)
assert tag_bloco('Incluído com sucesso!') == \
'<div class="success">Incluído com sucesso!</div>'
print(tag_bloco('bloco'))
| 25.818182
| 58
| 0.630282
|
af9857dd344873df059d7923b373f797b7b951d8
| 9,816
|
py
|
Python
|
backend/kale/tests/assets/kfp_dsl/pipeline_parameters.py
|
brness/kale
|
d90310dbebc765c68915df0cf832a7a5d1ec1551
|
[
"Apache-2.0"
] | 502
|
2019-07-18T16:19:16.000Z
|
2022-03-30T19:45:31.000Z
|
backend/kale/tests/assets/kfp_dsl/pipeline_parameters.py
|
brness/kale
|
d90310dbebc765c68915df0cf832a7a5d1ec1551
|
[
"Apache-2.0"
] | 189
|
2019-09-22T10:54:02.000Z
|
2022-03-28T13:46:31.000Z
|
backend/kale/tests/assets/kfp_dsl/pipeline_parameters.py
|
brness/kale
|
d90310dbebc765c68915df0cf832a7a5d1ec1551
|
[
"Apache-2.0"
] | 111
|
2019-09-25T20:28:47.000Z
|
2022-03-24T01:31:46.000Z
|
import json
import kfp.dsl as _kfp_dsl
import kfp.components as _kfp_components
from collections import OrderedDict
from kubernetes import client as k8s_client
def step1():
from kale.common import mlmdutils as _kale_mlmdutils
_kale_mlmdutils.init_metadata()
from kale.common import rokutils as _kale_rokutils
_kale_mlmdutils.call("link_input_rok_artifacts")
_kale_rokutils.snapshot_pipeline_step(
"test",
"step1",
"",
before=True)
from kale.marshal.decorator import marshal as _kale_marshal
from kale.common.runutils import link_artifacts as _kale_link_artifacts
_kale_pipeline_parameters = {}
@_kale_marshal([], ['data'], _kale_pipeline_parameters, "/marshal")
def step1():
return 10
step1()
_kale_artifacts = {}
_kale_link_artifacts(_kale_artifacts)
_rok_snapshot_task = _kale_rokutils.snapshot_pipeline_step(
"test",
"step1",
"",
before=False)
_kale_mlmdutils.call("submit_output_rok_artifact", _rok_snapshot_task)
_kale_mlmdutils.call("mark_execution_complete")
def step3(b: str):
from kale.common import mlmdutils as _kale_mlmdutils
_kale_mlmdutils.init_metadata()
from kale.common import rokutils as _kale_rokutils
_kale_mlmdutils.call("link_input_rok_artifacts")
_kale_rokutils.snapshot_pipeline_step(
"test",
"step3",
"",
before=True)
from kale.marshal.decorator import marshal as _kale_marshal
from kale.common.runutils import link_artifacts as _kale_link_artifacts
_kale_pipeline_parameters = {"b": b}
@_kale_marshal(['b', 'data'], [], _kale_pipeline_parameters, "/marshal")
def step3(st, st2):
print(st)
step3()
_kale_artifacts = {}
_kale_link_artifacts(_kale_artifacts)
_rok_snapshot_task = _kale_rokutils.snapshot_pipeline_step(
"test",
"step3",
"",
before=False)
_kale_mlmdutils.call("submit_output_rok_artifact", _rok_snapshot_task)
_kale_mlmdutils.call("mark_execution_complete")
def step2(a: int, c: int):
from kale.common import mlmdutils as _kale_mlmdutils
_kale_mlmdutils.init_metadata()
from kale.common import rokutils as _kale_rokutils
_kale_mlmdutils.call("link_input_rok_artifacts")
_kale_rokutils.snapshot_pipeline_step(
"test",
"step2",
"",
before=True)
from kale.marshal.decorator import marshal as _kale_marshal
from kale.common.runutils import link_artifacts as _kale_link_artifacts
_kale_pipeline_parameters = {"a": a, "c": c}
@_kale_marshal(['c', 'a', 'data'], ['res'], _kale_pipeline_parameters, "/marshal")
def step2(var1, var2, data):
print(var1 + var2)
return 'Test'
step2()
_kale_artifacts = {}
_kale_link_artifacts(_kale_artifacts)
_rok_snapshot_task = _kale_rokutils.snapshot_pipeline_step(
"test",
"step2",
"",
before=False)
_kale_mlmdutils.call("submit_output_rok_artifact", _rok_snapshot_task)
_kale_mlmdutils.call("mark_execution_complete")
def final_auto_snapshot():
from kale.common import mlmdutils as _kale_mlmdutils
_kale_mlmdutils.init_metadata()
from kale.marshal.decorator import marshal as _kale_marshal
from kale.common.runutils import link_artifacts as _kale_link_artifacts
_kale_pipeline_parameters = {}
@_kale_marshal([], [], _kale_pipeline_parameters, "/marshal")
def _no_op():
pass
_no_op()
_kale_artifacts = {}
_kale_link_artifacts(_kale_artifacts)
from kale.common import rokutils as _kale_rokutils
_kale_mlmdutils.call("link_input_rok_artifacts")
_rok_snapshot_task = _kale_rokutils.snapshot_pipeline_step(
"test",
"final_auto_snapshot",
"",
before=False)
_kale_mlmdutils.call("submit_output_rok_artifact", _rok_snapshot_task)
_kale_mlmdutils.call("mark_execution_complete")
_kale_step1_op = _kfp_components.func_to_container_op(step1)
_kale_step3_op = _kfp_components.func_to_container_op(step3)
_kale_step2_op = _kfp_components.func_to_container_op(step2)
_kale_final_auto_snapshot_op = _kfp_components.func_to_container_op(
final_auto_snapshot)
@_kfp_dsl.pipeline(
name='test',
description=''
)
def auto_generated_pipeline(a='1', b='Some string', c='5'):
_kale_pvolumes_dict = OrderedDict()
_kale_volume_step_names = []
_kale_volume_name_parameters = []
_kale_marshal_vop = _kfp_dsl.VolumeOp(
name="kale-marshal-volume",
resource_name="kale-marshal-pvc",
modes=['ReadWriteMany'],
size="1Gi"
)
_kale_volume_step_names.append(_kale_marshal_vop.name)
_kale_volume_name_parameters.append(
_kale_marshal_vop.outputs["name"].full_name)
_kale_pvolumes_dict['/marshal'] = _kale_marshal_vop.volume
_kale_volume_step_names.sort()
_kale_volume_name_parameters.sort()
_kale_step1_task = _kale_step1_op()\
.add_pvolumes(_kale_pvolumes_dict)\
.after()
_kale_step1_task.container.working_dir = "/test"
_kale_step1_task.container.set_security_context(
k8s_client.V1SecurityContext(run_as_user=0))
_kale_output_artifacts = {}
_kale_output_artifacts.update(
{'mlpipeline-ui-metadata': '/tmp/mlpipeline-ui-metadata.json'})
_kale_step1_task.output_artifact_paths.update(_kale_output_artifacts)
_kale_step1_task.add_pod_label(
"pipelines.kubeflow.org/metadata_written", "true")
_kale_dep_names = (_kale_step1_task.dependent_names +
_kale_volume_step_names)
_kale_step1_task.add_pod_annotation(
"kubeflow-kale.org/dependent-templates", json.dumps(_kale_dep_names))
if _kale_volume_name_parameters:
_kale_step1_task.add_pod_annotation(
"kubeflow-kale.org/volume-name-parameters",
json.dumps(_kale_volume_name_parameters))
_kale_step3_task = _kale_step3_op(b)\
.add_pvolumes(_kale_pvolumes_dict)\
.after(_kale_step1_task)
_kale_step3_task.container.working_dir = "/test"
_kale_step3_task.container.set_security_context(
k8s_client.V1SecurityContext(run_as_user=0))
_kale_output_artifacts = {}
_kale_output_artifacts.update(
{'mlpipeline-ui-metadata': '/tmp/mlpipeline-ui-metadata.json'})
_kale_step3_task.output_artifact_paths.update(_kale_output_artifacts)
_kale_step3_task.add_pod_label(
"pipelines.kubeflow.org/metadata_written", "true")
_kale_dep_names = (_kale_step3_task.dependent_names +
_kale_volume_step_names)
_kale_step3_task.add_pod_annotation(
"kubeflow-kale.org/dependent-templates", json.dumps(_kale_dep_names))
if _kale_volume_name_parameters:
_kale_step3_task.add_pod_annotation(
"kubeflow-kale.org/volume-name-parameters",
json.dumps(_kale_volume_name_parameters))
_kale_step2_task = _kale_step2_op(a, c)\
.add_pvolumes(_kale_pvolumes_dict)\
.after(_kale_step1_task)
_kale_step2_task.container.working_dir = "/test"
_kale_step2_task.container.set_security_context(
k8s_client.V1SecurityContext(run_as_user=0))
_kale_output_artifacts = {}
_kale_output_artifacts.update(
{'mlpipeline-ui-metadata': '/tmp/mlpipeline-ui-metadata.json'})
_kale_step2_task.output_artifact_paths.update(_kale_output_artifacts)
_kale_step2_task.add_pod_label(
"pipelines.kubeflow.org/metadata_written", "true")
_kale_dep_names = (_kale_step2_task.dependent_names +
_kale_volume_step_names)
_kale_step2_task.add_pod_annotation(
"kubeflow-kale.org/dependent-templates", json.dumps(_kale_dep_names))
if _kale_volume_name_parameters:
_kale_step2_task.add_pod_annotation(
"kubeflow-kale.org/volume-name-parameters",
json.dumps(_kale_volume_name_parameters))
_kale_final_auto_snapshot_task = _kale_final_auto_snapshot_op()\
.add_pvolumes(_kale_pvolumes_dict)\
.after(_kale_step3_task, _kale_step2_task)
_kale_final_auto_snapshot_task.container.working_dir = "/test"
_kale_final_auto_snapshot_task.container.set_security_context(
k8s_client.V1SecurityContext(run_as_user=0))
_kale_output_artifacts = {}
_kale_output_artifacts.update(
{'mlpipeline-ui-metadata': '/tmp/mlpipeline-ui-metadata.json'})
_kale_final_auto_snapshot_task.output_artifact_paths.update(
_kale_output_artifacts)
_kale_final_auto_snapshot_task.add_pod_label(
"pipelines.kubeflow.org/metadata_written", "true")
_kale_dep_names = (_kale_final_auto_snapshot_task.dependent_names +
_kale_volume_step_names)
_kale_final_auto_snapshot_task.add_pod_annotation(
"kubeflow-kale.org/dependent-templates", json.dumps(_kale_dep_names))
if _kale_volume_name_parameters:
_kale_final_auto_snapshot_task.add_pod_annotation(
"kubeflow-kale.org/volume-name-parameters",
json.dumps(_kale_volume_name_parameters))
if __name__ == "__main__":
pipeline_func = auto_generated_pipeline
pipeline_filename = pipeline_func.__name__ + '.pipeline.tar.gz'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)
# Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment('test')
# Submit a pipeline run
from kale.common import kfputils
pipeline_id, version_id = kfputils.upload_pipeline(
pipeline_filename, "test")
run_result = kfputils.run_pipeline(
experiment_name=experiment.name, pipeline_id=pipeline_id, version_id=version_id)
| 34.083333
| 88
| 0.724837
|
fa3f65d68e80492c7cbf1e39a9f05ec53222d8e2
| 9,240
|
py
|
Python
|
spyder/plugins/history/widgets.py
|
ok97465/spyder
|
e92e0fc963d597ec0e7ad447eca865ed8090d576
|
[
"MIT"
] | null | null | null |
spyder/plugins/history/widgets.py
|
ok97465/spyder
|
e92e0fc963d597ec0e7ad447eca865ed8090d576
|
[
"MIT"
] | 1
|
2020-11-02T21:11:19.000Z
|
2020-11-02T21:11:19.000Z
|
spyder/plugins/history/widgets.py
|
steff456/spyder
|
e92e0fc963d597ec0e7ad447eca865ed8090d576
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""History Widget."""
# Standard library imports
import os.path as osp
import re
import sys
# Third party imports
from qtpy.QtCore import Signal, Slot
from qtpy.QtWidgets import QVBoxLayout, QWidget
# Local imports
from spyder.api.config.decorators import on_conf_change
from spyder.api.translations import get_translation
from spyder.api.widgets.main_widget import PluginMainWidget
from spyder.py3compat import is_text_string, to_text_string
from spyder.utils import encoding
from spyder.utils.sourcecode import normalize_eols
from spyder.widgets.findreplace import FindReplace
from spyder.widgets.simplecodeeditor import SimpleCodeEditor
from spyder.widgets.tabs import Tabs
# Localization
_ = get_translation('spyder')
# --- Constants
# ----------------------------------------------------------------------------
# Maximum number of lines to show
MAX_LINES = 1000
class HistoryWidgetActions:
# Triggers
MaximumHistoryEntries = 'maximum_history_entries_action'
# Toggles
ToggleWrap = 'toggle_wrap_action'
ToggleLineNumbers = 'toggle_line_numbers_action'
class HistoryWidgetOptionsMenuSections:
Main = 'main_section'
# --- Widgets
# ----------------------------------------------------------------------------
class HistoryWidget(PluginMainWidget):
"""
History plugin main widget.
"""
# Signals
sig_focus_changed = Signal()
"""
This signal is emitted when the focus of the code editor storing history
changes.
"""
def __init__(self, name, plugin, parent):
super().__init__(name, plugin, parent)
# Attributes
self.editors = []
self.filenames = []
self.tabwidget = None
self.dockviewer = None
self.wrap_action = None
self.linenumbers_action = None
self.editors = []
self.filenames = []
self.font = None
# Widgets
self.tabwidget = Tabs(self)
self.find_widget = FindReplace(self)
# Setup
self.find_widget.hide()
# Layout
layout = QVBoxLayout()
# TODO: Move this to the tab container directly
if sys.platform == 'darwin':
tab_container = QWidget(self)
tab_container.setObjectName('tab-container')
tab_layout = QVBoxLayout(tab_container)
tab_layout.setContentsMargins(0, 0, 0, 0)
tab_layout.addWidget(self.tabwidget)
layout.addWidget(tab_container)
else:
layout.addWidget(self.tabwidget)
layout.addWidget(self.find_widget)
self.setLayout(layout)
# Signals
self.tabwidget.currentChanged.connect(self.refresh)
self.tabwidget.move_data.connect(self.move_tab)
# --- PluginMainWidget API
# ------------------------------------------------------------------------
def get_title(self):
return _('History')
def get_focus_widget(self):
return self.tabwidget.currentWidget()
def setup(self):
# Actions
self.wrap_action = self.create_action(
HistoryWidgetActions.ToggleWrap,
text=_("Wrap lines"),
toggled=True,
initial=self.get_conf('wrap'),
option='wrap'
)
self.linenumbers_action = self.create_action(
HistoryWidgetActions.ToggleLineNumbers,
text=_("Show line numbers"),
toggled=True,
initial=self.get_conf('line_numbers'),
option='line_numbers'
)
# Menu
menu = self.get_options_menu()
for item in [self.wrap_action, self.linenumbers_action]:
self.add_item_to_menu(
item,
menu=menu,
section=HistoryWidgetOptionsMenuSections.Main,
)
def update_actions(self):
pass
@on_conf_change(option='wrap')
def on_wrap_update(self, value):
for editor in self.editors:
editor.toggle_wrap_mode(value)
@on_conf_change(option='line_numbers')
def on_line_numbers_update(self, value):
for editor in self.editors:
editor.toggle_line_numbers(value)
@on_conf_change(option='selected', section='appearance')
def on_color_scheme_change(self, value):
for editor in self.editors:
editor.set_font(self.font)
# --- Public API
# ------------------------------------------------------------------------
def update_font(self, font, color_scheme):
"""
Update font of the code editor.
Parameters
----------
font: QFont
Font object.
color_scheme: str
Name of the color scheme to use.
"""
self.color_scheme = color_scheme
self.font = font
for editor in self.editors:
editor.set_font(font)
editor.set_color_scheme(color_scheme)
def move_tab(self, index_from, index_to):
"""
Move tab.
Parameters
----------
index_from: int
Move tab from this index.
index_to: int
Move tab to this index.
Notes
-----
Tabs themselves have already been moved by the history.tabwidget.
"""
filename = self.filenames.pop(index_from)
editor = self.editors.pop(index_from)
self.filenames.insert(index_to, filename)
self.editors.insert(index_to, editor)
def get_filename_text(self, filename):
"""
Read and return content from filename.
Parameters
----------
filename: str
The file path to read.
Returns
-------
str
Content of the filename.
"""
# Avoid a possible error when reading the history file
try:
text, _ = encoding.read(filename)
except (IOError, OSError):
text = "# Previous history could not be read from disk, sorry\n\n"
text = normalize_eols(text)
linebreaks = [m.start() for m in re.finditer('\n', text)]
if len(linebreaks) > MAX_LINES:
text = text[linebreaks[-MAX_LINES - 1] + 1:]
# Avoid an error when trying to write the trimmed text to disk.
# See spyder-ide/spyder#9093.
try:
encoding.write(text, filename)
except (IOError, OSError):
pass
return text
def add_history(self, filename):
"""
Create a history tab for `filename`.
Parameters
----------
filename: str
History filename.
"""
filename = encoding.to_unicode_from_fs(filename)
if filename in self.filenames:
return
# Widgets
editor = SimpleCodeEditor(self)
# Setup
language = 'py' if osp.splitext(filename)[1] == '.py' else 'bat'
editor.setup_editor(
linenumbers=self.get_conf('line_numbers'),
language=language,
color_scheme=self.get_conf('selected', section='appearance'),
font=self.font,
wrap=self.get_conf('wrap'),
)
editor.setReadOnly(True)
editor.set_text(self.get_filename_text(filename))
editor.set_cursor_position('eof')
self.find_widget.set_editor(editor)
index = self.tabwidget.addTab(editor, osp.basename(filename))
self.filenames.append(filename)
self.editors.append(editor)
self.tabwidget.setCurrentIndex(index)
self.tabwidget.setTabToolTip(index, filename)
# Signals
editor.sig_focus_changed.connect(lambda: self.sig_focus_changed.emit())
@Slot(str, str)
def append_to_history(self, filename, command):
"""
Append command to history tab.
Parameters
----------
filename: str
History file.
command: str
Command to append to history file.
"""
if not is_text_string(filename): # filename is a QString
filename = to_text_string(filename.toUtf8(), 'utf-8')
index = self.filenames.index(filename)
command = to_text_string(command)
self.editors[index].append(command)
if self.get_conf('go_to_eof'):
self.editors[index].set_cursor_position('eof')
self.tabwidget.setCurrentIndex(index)
def refresh(self):
"""Refresh widget and update find widget on current editor."""
if self.tabwidget.count():
editor = self.tabwidget.currentWidget()
else:
editor = None
self.find_widget.set_editor(editor)
def test():
"""Run history widget."""
from spyder.utils.qthelpers import qapplication
from unittest.mock import MagicMock
plugin_mock = MagicMock()
plugin_mock.CONF_SECTION = 'historylog'
app = qapplication(test_time=8)
widget = HistoryWidget('historylog', plugin_mock, None)
widget._setup()
widget.setup()
widget.show()
sys.exit(app.exec_())
if __name__ == '__main__':
test()
| 28.343558
| 79
| 0.591883
|
d0ec1a39118326cad6dc809b9b325f96d6a52275
| 4,644
|
py
|
Python
|
proto1/card_format.py
|
Ravenshard/AI_Dominion
|
8def66aa8575ebc7c46d02f4797a50f603f64630
|
[
"Apache-2.0"
] | 1
|
2019-11-18T03:34:55.000Z
|
2019-11-18T03:34:55.000Z
|
proto1/card_format.py
|
Ravenshard/AI_Dominion
|
8def66aa8575ebc7c46d02f4797a50f603f64630
|
[
"Apache-2.0"
] | null | null | null |
proto1/card_format.py
|
Ravenshard/AI_Dominion
|
8def66aa8575ebc7c46d02f4797a50f603f64630
|
[
"Apache-2.0"
] | null | null | null |
class card():
"""docstring for ."""
def __init__(self, name, type, cost, coins=0, vp=0):
'''
Parameters:
name: string
type: LIST OF STRINGS
cost: int
coins: int
vp: int
Returns:
None
'''
self.name = name
self.type = type
self.cost = cost
self.coins = coins
self.vp = vp
return
def __str__(self):
'''When you print card, return card name'''
return self.getName()
def getName(self): return self.name
def getType(self): return self.type
def getCost(self): return self.cost
def getCoins(self): return self.coins
def getVp(self): return self.vp
def isTreasure(self): return "treasure" in self.type
def isVictory(self): return "victory" in self.type
def isCurse(self): return "curse" in self.type
def kingdomCards():
''' Return a list of all cards in the kingdom
'''
kingdom = list()
kingdom.append(card("curse", ["curse"], 0, coins=0, vp=-1))
kingdom.append(card("estate", ["victory"], 2, coins=0, vp=1))
kingdom.append(card("duchy", ["victory"], 5, coins=0, vp=3))
kingdom.append(card("province", ["victory"], 8, coins=0, vp=6))
kingdom.append(card("copper", ["treasure"], 0, coins=1, vp=0))
kingdom.append(card("silver", ["treasure"], 3, coins=2, vp=0))
kingdom.append(card("gold", ["treasure"], 6, coins=3, vp=0))
return kingdom
def kingdomCardValues(kingdom):
kingdomAmounts = dict()
for card in kingdom:
if card.getName() == "curse": kingdomAmounts[card.getName()] = 10
elif card.getName() == "estate": kingdomAmounts[card.getName()] = 8
elif card.getName() == "duchy": kingdomAmounts[card.getName()] = 8
elif card.getName() == "province": kingdomAmounts[card.getName()] = 8
elif card.getName() == "copper": kingdomAmounts[card.getName()] = 46
elif card.getName() == "silver": kingdomAmounts[card.getName()] = 40
elif card.getName() == "gold": kingdomAmounts[card.getName()] = 30
return kingdomAmounts
def startingCards():
''' Return the cards the bot starts with
'''
deck = list()
for _ in range(7): deck.append(card("copper", "treasure", 0, coins=1, vp=0))
for _ in range(3): deck.append(card("estate", "victory", 2, coins=0, vp=1))
return deck
def allDeckCards(hand, deck, discard, play):
''' Start to get all the cards the bot owns into a single list
'''
content = list()
areas = [hand, deck, discard, play]
for area in areas:
for card in area: content.append(card.getName())
return deckContent(content)
def deckContent(deck):
''' Create a list of lists of all the elements in the deck
I.e. [... [copper, 7] ...] indicates there 7 coppers in the deck
THIS IS THE STATE OF THE BOT
'''
tmpSupplyCards = kingdomCards()
supplyCards = list()
for card in tmpSupplyCards: supplyCards.append( (card.getName(), 0) )
for card in deck:
for index in range(len(supplyCards)):
if card in supplyCards[index]:
count = supplyCards[index][1]
supplyCards[index] = (card, count+1)
# supplyCard[1] += 1
break
# print(supplyCards)
supplyCards = tuple(supplyCards)
return supplyCards
def newCard(deck, name):
if name == "curse":
deck.append(card("curse", ["curse"], 0, coins=0, vp=-1))
elif name == "estate":
deck.append(card("estate", ["victory"], 2, coins=0, vp=1))
elif name == "duchy":
deck.append(card("duchy", ["victory"], 5, coins=0, vp=3))
elif name == "province":
deck.append(card("province", ["victory"], 8, coins=0, vp=6))
elif name == "copper":
deck.append(card("copper", ["treasure"], 0, coins=1, vp=0))
elif name == "silver":
deck.append(card("silver", ["treasure"], 3, coins=2, vp=0))
elif name == "gold":
deck.append(card("gold", ["treasure"], 6, coins=3, vp=0))
elif name == "none":
pass
return deck
def testCards():
x = card("silver", ["treasure"], 3, coins=2)
# assertEqual(x.getName(), "silver", 'pass')
# print("name: {} \t type: {} \t cost: {} \t coins: {} \t vp: {}".format(
# x.getName(), x.getType(), x.getCost(), x.getCoins(), x.getVp()))
# print("is {} a treasure? {}".format(x.getName(), x.isTreasure()))
# print("is {} a victory? {}".format(x.getName(), x.isVictory()))
return
def main():
testCards()
return
if __name__ == '__main__':
main()
| 33.410072
| 80
| 0.579242
|
1cc6cc696f2928a38a858bac02b06eea43847d3c
| 1,114
|
py
|
Python
|
pinakes/main/migrations/0050_remove_notificationtype_main_notificationtype_n_type_unique_and_more.py
|
Alex-Izquierdo/pinakes
|
dfeb855662b47d29a6e976e87fd7c090a262cf3f
|
[
"Apache-2.0"
] | 2
|
2022-03-17T18:53:58.000Z
|
2022-03-17T22:04:22.000Z
|
pinakes/main/migrations/0050_remove_notificationtype_main_notificationtype_n_type_unique_and_more.py
|
Alex-Izquierdo/pinakes
|
dfeb855662b47d29a6e976e87fd7c090a262cf3f
|
[
"Apache-2.0"
] | 9
|
2022-03-18T08:22:57.000Z
|
2022-03-30T17:14:49.000Z
|
pinakes/main/migrations/0050_remove_notificationtype_main_notificationtype_n_type_unique_and_more.py
|
Alex-Izquierdo/pinakes
|
dfeb855662b47d29a6e976e87fd7c090a262cf3f
|
[
"Apache-2.0"
] | 7
|
2022-03-17T22:03:08.000Z
|
2022-03-28T21:28:34.000Z
|
# Generated by Django 4.0.2 on 2022-05-12 20:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("main", "0049_notificationsetting_encryption"),
]
operations = [
migrations.RemoveConstraint(
model_name="notificationtype",
name="main_notificationtype_n_type_unique",
),
migrations.AlterField(
model_name="notificationtype",
name="n_type",
field=models.CharField(
help_text="Name of the notification type",
max_length=128,
unique=True,
),
),
migrations.AlterField(
model_name="orderitem",
name="name",
field=models.CharField(
help_text="Name of the portfolio item or order process",
max_length=512,
),
),
migrations.AlterField(
model_name="portfolio",
name="name",
field=models.CharField(help_text="Portfolio name", max_length=255),
),
]
| 27.85
| 79
| 0.550269
|
5e47b5c3e6a9d4239ba74be2f46b8a089744f198
| 13,907
|
py
|
Python
|
lasagne/tests/layers/test_normalization.py
|
huangshunliang/Lasagne_h
|
359ea1b9f12678c3523c0cb100f646528d49df9e
|
[
"MIT"
] | 2
|
2021-09-22T18:39:08.000Z
|
2021-11-17T10:39:57.000Z
|
lasagne/tests/layers/test_normalization.py
|
huangshunliang/Lasagne_h
|
359ea1b9f12678c3523c0cb100f646528d49df9e
|
[
"MIT"
] | 1
|
2021-03-20T04:42:05.000Z
|
2021-03-20T04:42:05.000Z
|
lasagne/tests/layers/test_normalization.py
|
huangshunliang/Lasagne_h
|
359ea1b9f12678c3523c0cb100f646528d49df9e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
The :func:`ground_truth_normalizer()`, :func:`ground_truth_normalize_row` and
:class:`TestLocalResponseNormalization2DLayer` implementations contain code
from `pylearn2 <http://github.com/lisa-lab/pylearn2>`_, which is covered
by the following license:
Copyright (c) 2011--2014, Université de Montréal
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from mock import Mock
import numpy as np
import pytest
import theano
def ground_truth_normalizer(c01b, k, n, alpha, beta):
out = np.zeros(c01b.shape)
for r in range(out.shape[1]):
for c in range(out.shape[2]):
for x in range(out.shape[3]):
out[:, r, c, x] = ground_truth_normalize_row(
row=c01b[:, r, c, x],
k=k, n=n, alpha=alpha, beta=beta)
return out
def ground_truth_normalize_row(row, k, n, alpha, beta):
assert row.ndim == 1
out = np.zeros(row.shape)
for i in range(row.shape[0]):
s = k
tot = 0
for j in range(max(0, i-n//2), min(row.shape[0], i+n//2+1)):
tot += 1
sq = row[j] ** 2.
assert sq > 0.
assert s >= k
assert alpha > 0.
s += alpha * sq
assert s >= k
assert tot <= n
assert s >= k
s = s ** beta
out[i] = row[i] / s
return out
class TestLocalResponseNormalization2DLayer:
@pytest.fixture
def rng(self):
return np.random.RandomState([2013, 2])
@pytest.fixture
def input_data(self, rng):
channels = 15
rows = 3
cols = 4
batch_size = 2
shape = (batch_size, channels, rows, cols)
return rng.randn(*shape).astype(theano.config.floatX)
@pytest.fixture
def input_layer(self, input_data):
from lasagne.layers.input import InputLayer
shape = list(input_data.shape)
shape[0] = None
return InputLayer(shape)
@pytest.fixture
def layer(self, input_layer):
from lasagne.layers.normalization import\
LocalResponseNormalization2DLayer
layer = LocalResponseNormalization2DLayer(input_layer,
alpha=1.5,
k=2,
beta=0.75,
n=5)
return layer
def test_get_params(self, layer):
assert layer.get_params() == []
def test_get_output_shape_for(self, layer):
assert layer.get_output_shape_for((1, 2, 3, 4)) == (1, 2, 3, 4)
def test_even_n_fails(self, input_layer):
from lasagne.layers.normalization import\
LocalResponseNormalization2DLayer
with pytest.raises(NotImplementedError):
LocalResponseNormalization2DLayer(input_layer, n=4)
def test_normalization(self, input_data, input_layer, layer):
from lasagne.layers import get_output
X = input_layer.input_var
lrn = theano.function([X], get_output(layer, X))
out = lrn(input_data)
# ground_truth_normalizer assumes c01b
input_data_c01b = input_data.transpose([1, 2, 3, 0])
ground_out = ground_truth_normalizer(input_data_c01b,
n=layer.n, k=layer.k,
alpha=layer.alpha,
beta=layer.beta)
ground_out = np.transpose(ground_out, [3, 0, 1, 2])
assert out.shape == ground_out.shape
assert np.allclose(out, ground_out)
class TestBatchNormLayer:
@pytest.fixture(params=(False, True), ids=('plain', 'dnn'))
def BatchNormLayer(self, request):
dnn = request.param
if not dnn:
from lasagne.layers.normalization import BatchNormLayer
elif dnn:
try:
from lasagne.layers.dnn import (
BatchNormDNNLayer as BatchNormLayer)
except ImportError:
pytest.skip("cuDNN batch norm not available")
return BatchNormLayer
@pytest.fixture
def init_unique(self):
# initializer for a tensor of unique values
return lambda shape: np.arange(np.prod(shape)).reshape(shape)
def test_init(self, BatchNormLayer, init_unique):
input_shape = (2, 3, 4)
# default: normalize over all but second axis
beta = BatchNormLayer(input_shape, beta=init_unique).beta
assert np.allclose(beta.get_value(), init_unique((3,)))
# normalize over first axis only
beta = BatchNormLayer(input_shape, beta=init_unique, axes=0).beta
assert np.allclose(beta.get_value(), init_unique((3, 4)))
# normalize over second and third axis
try:
beta = BatchNormLayer(
input_shape, beta=init_unique, axes=(1, 2)).beta
assert np.allclose(beta.get_value(), init_unique((2,)))
except ValueError as exc:
assert "BatchNormDNNLayer only supports" in exc.args[0]
@pytest.mark.parametrize('update_averages', [None, True, False])
@pytest.mark.parametrize('use_averages', [None, True, False])
@pytest.mark.parametrize('deterministic', [True, False])
def test_get_output_for(self, BatchNormLayer, deterministic, use_averages,
update_averages):
input_shape = (20, 30, 40)
# random input tensor, beta, gamma, mean, inv_std and alpha
input = (np.random.randn(*input_shape).astype(theano.config.floatX) +
np.random.randn(1, 30, 1).astype(theano.config.floatX))
beta = np.random.randn(30).astype(theano.config.floatX)
gamma = np.random.randn(30).astype(theano.config.floatX)
mean = np.random.randn(30).astype(theano.config.floatX)
inv_std = np.random.rand(30).astype(theano.config.floatX)
alpha = np.random.rand()
# create layer (with default axes: normalize over all but second axis)
layer = BatchNormLayer(input_shape, beta=beta, gamma=gamma, mean=mean,
inv_std=inv_std, alpha=alpha)
# call get_output_for()
kwargs = {'deterministic': deterministic}
if use_averages is not None:
kwargs['batch_norm_use_averages'] = use_averages
else:
use_averages = deterministic
if update_averages is not None:
kwargs['batch_norm_update_averages'] = update_averages
else:
update_averages = not deterministic
result = layer.get_output_for(theano.tensor.constant(input),
**kwargs).eval()
# compute expected results and expected updated parameters
input_mean = input.mean(axis=(0, 2))
input_inv_std = 1 / np.sqrt(input.var(axis=(0, 2)) + layer.epsilon)
if use_averages:
use_mean, use_inv_std = mean, inv_std
else:
use_mean, use_inv_std = input_mean, input_inv_std
bcast = (np.newaxis, slice(None), np.newaxis)
exp_result = (input - use_mean[bcast]) * use_inv_std[bcast]
exp_result = exp_result * gamma[bcast] + beta[bcast]
if update_averages:
new_mean = (1 - alpha) * mean + alpha * input_mean
new_inv_std = (1 - alpha) * inv_std + alpha * input_inv_std
else:
new_mean, new_inv_std = mean, inv_std
# compare expected results to actual results
tol = {'atol': 1e-5, 'rtol': 1e-6}
assert np.allclose(layer.mean.get_value(), new_mean, **tol)
assert np.allclose(layer.inv_std.get_value(), new_inv_std, **tol)
assert np.allclose(result, exp_result, **tol)
def test_undefined_shape(self, BatchNormLayer):
# should work:
BatchNormLayer((64, 2, None), axes=(0, 2))
# should not work:
with pytest.raises(ValueError) as exc:
BatchNormLayer((64, None, 3), axes=(0, 2))
assert 'needs specified input sizes' in exc.value.args[0]
def test_skip_linear_transform(self, BatchNormLayer):
input_shape = (20, 30, 40)
# random input tensor, beta, gamma
input = (np.random.randn(*input_shape).astype(theano.config.floatX) +
np.random.randn(1, 30, 1).astype(theano.config.floatX))
beta = np.random.randn(30).astype(theano.config.floatX)
gamma = np.random.randn(30).astype(theano.config.floatX)
# create layers without beta or gamma
layer1 = BatchNormLayer(input_shape, beta=None, gamma=gamma)
layer2 = BatchNormLayer(input_shape, beta=beta, gamma=None)
# check that one parameter is missing
assert len(layer1.get_params()) == 3
assert len(layer2.get_params()) == 3
# call get_output_for()
result1 = layer1.get_output_for(theano.tensor.constant(input),
deterministic=False).eval()
result2 = layer2.get_output_for(theano.tensor.constant(input),
deterministic=False).eval()
# compute expected results and expected updated parameters
mean = input.mean(axis=(0, 2))
std = np.sqrt(input.var(axis=(0, 2)) + layer1.epsilon)
exp_result = (input - mean[None, :, None]) / std[None, :, None]
exp_result1 = exp_result * gamma[None, :, None] # no beta
exp_result2 = exp_result + beta[None, :, None] # no gamma
# compare expected results to actual results
tol = {'atol': 1e-5, 'rtol': 1e-6}
assert np.allclose(result1, exp_result1, **tol)
assert np.allclose(result2, exp_result2, **tol)
@pytest.mark.parametrize('dnn', [False, True])
def test_batch_norm_macro(dnn):
if not dnn:
from lasagne.layers import (BatchNormLayer, batch_norm)
else:
try:
from lasagne.layers.dnn import (
BatchNormDNNLayer as BatchNormLayer,
batch_norm_dnn as batch_norm)
except ImportError:
pytest.skip("cuDNN batch norm not available")
from lasagne.layers import (Layer, NonlinearityLayer)
from lasagne.nonlinearities import identity
input_shape = (2, 3)
obj = object()
# check if it steals the nonlinearity
layer = Mock(Layer, output_shape=input_shape, nonlinearity=obj)
bnstack = batch_norm(layer)
assert isinstance(bnstack, NonlinearityLayer)
assert isinstance(bnstack.input_layer, BatchNormLayer)
assert layer.nonlinearity is identity
assert bnstack.nonlinearity is obj
# check if it removes the bias
layer = Mock(Layer, output_shape=input_shape, b=obj, params={obj: set()})
bnstack = batch_norm(layer)
assert isinstance(bnstack, BatchNormLayer)
assert layer.b is None
assert obj not in layer.params
# check if it can handle an unset bias
layer = Mock(Layer, output_shape=input_shape, b=None, params={obj: set()})
bnstack = batch_norm(layer)
assert isinstance(bnstack, BatchNormLayer)
assert layer.b is None
# check if it passes on kwargs
layer = Mock(Layer, output_shape=input_shape)
bnstack = batch_norm(layer, name='foo')
assert isinstance(bnstack, BatchNormLayer)
assert bnstack.name == 'foo'
# check if created layers are named with kwargs name
layer = Mock(Layer, output_shape=input_shape, nonlinearity=obj)
layer.name = 'foo'
bnstack = batch_norm(layer, name='foo_bnorm')
assert isinstance(bnstack, NonlinearityLayer)
assert isinstance(bnstack.input_layer, BatchNormLayer)
assert bnstack.name == 'foo_bnorm_nonlin'
assert bnstack.input_layer.name == 'foo_bnorm'
# check if created layers are named with wrapped layer name
layer = Mock(Layer, output_shape=input_shape, nonlinearity=obj)
layer.name = 'foo'
bnstack = batch_norm(layer)
assert isinstance(bnstack, NonlinearityLayer)
assert isinstance(bnstack.input_layer, BatchNormLayer)
assert bnstack.name == 'foo_bn_nonlin'
assert bnstack.input_layer.name == 'foo_bn'
# check if created layers remain unnamed if no names are given
layer = Mock(Layer, output_shape=input_shape, nonlinearity=obj)
bnstack = batch_norm(layer)
assert isinstance(bnstack, NonlinearityLayer)
assert isinstance(bnstack.input_layer, BatchNormLayer)
assert bnstack.name is None
assert bnstack.input_layer.name is None
| 39.848138
| 79
| 0.640541
|
22a2c34ed37e4b975aa8af80ea2ac1cc52e96d46
| 988
|
py
|
Python
|
ajax_table.py
|
mdmrts/flask_tables
|
95921f3af9ab32547b822a5d5c4fd37c33bfb753
|
[
"MIT"
] | null | null | null |
ajax_table.py
|
mdmrts/flask_tables
|
95921f3af9ab32547b822a5d5c4fd37c33bfb753
|
[
"MIT"
] | null | null | null |
ajax_table.py
|
mdmrts/flask_tables
|
95921f3af9ab32547b822a5d5c4fd37c33bfb753
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), index=True)
age = db.Column(db.Integer, index=True)
address = db.Column(db.String(256))
phone = db.Column(db.String(20))
email = db.Column(db.String(120))
def to_dict(self):
return {
'name': self.name,
'age': self.age,
'address': self.address,
'phone': self.phone,
'email': self.email
}
db.create_all()
@app.route('/')
def index():
return render_template('ajax_table.html', title='Ajax Table')
@app.route('/api/data')
def data():
return {'data': [user.to_dict() for user in User.query]}
if __name__ == '__main__':
app.run()
| 24.097561
| 65
| 0.632591
|
0c50e631fefceb97f2c97c8747698f8418c3d2ab
| 472,007
|
py
|
Python
|
modules/s3db/hrm.py
|
himansu1997/eden
|
1e2cf2b00f55da46b1ce3e6b7ad44b5345d7a1dc
|
[
"MIT"
] | null | null | null |
modules/s3db/hrm.py
|
himansu1997/eden
|
1e2cf2b00f55da46b1ce3e6b7ad44b5345d7a1dc
|
[
"MIT"
] | null | null | null |
modules/s3db/hrm.py
|
himansu1997/eden
|
1e2cf2b00f55da46b1ce3e6b7ad44b5345d7a1dc
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
""" Sahana Eden Human Resources Management
@copyright: 2011-2021 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("HRModel",
"HRSiteModel",
"HRSalaryModel",
"HRInsuranceModel",
#"HRJobModel",
"HRContractModel",
"HRSkillModel",
"HRTagModel",
"HRAppraisalModel",
"HRExperienceModel",
"HRAwardModel",
"HRDisciplinaryActionModel",
"HRProgrammeModel",
"HRShiftModel",
"HRDelegationModel",
"hrm_AssignMethod",
"hrm_competency_controller",
"hrm_compose",
"hrm_configure_pr_group_membership",
"hrm_credential_controller",
"hrm_CV",
"hrm_experience_controller",
"hrm_group_controller",
"hrm_human_resource_controller",
"hrm_human_resource_filters",
"hrm_HumanResourceRepresent",
"hrm_human_resource_onaccept",
"hrm_map_popup",
#"hrm_Medical",
"hrm_person_controller",
#"hrm_position_represent",
"hrm_Record",
"hrm_rheader",
"hrm_training_controller",
"hrm_training_event_controller",
"hrm_TrainingEventRepresent",
"hrm_xls_list_fields",
#"hrm_competency_list_layout",
#"hrm_credential_list_layout",
#"hrm_experience_list_layout",
#"hrm_training_list_layout",
)
import datetime
import json
from gluon import *
from gluon.sqlhtml import RadioWidget
from gluon.storage import Storage
from ..s3 import *
from s3layouts import S3PopupLink
# =============================================================================
class HRModel(S3Model):
names = ("hrm_department",
"hrm_department_id",
"hrm_job_title",
"hrm_job_title_id",
"hrm_job_title_human_resource",
"hrm_human_resource",
"hrm_human_resource_id",
"hrm_type_opts",
"hrm_human_resource_represent",
)
def model(self):
T = current.T
db = current.db
s3 = current.response.s3
auth = current.auth
settings = current.deployment_settings
ADMIN = current.session.s3.system_roles.ADMIN
is_admin = auth.s3_has_role(ADMIN)
AUTOCOMPLETE_HELP = current.messages.AUTOCOMPLETE_HELP
#ORGANISATION = messages.ORGANISATION
add_components = self.add_components
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
super_link = self.super_link
organisation_id = self.org_organisation_id
root_org = auth.root_org()
if is_admin:
filter_opts = ()
elif root_org:
filter_opts = (root_org, None)
else:
filter_opts = (None,)
mix_staff = settings.get_hrm_mix_staff()
request = current.request
controller = request.controller
group = request.get_vars.get("group", None)
if not group:
if mix_staff:
group = None
elif controller == "vol":
group = "volunteer"
elif controller == "deploy":
group = None
#elif controller in ("hrm", "org", "inv", "cr", "med", "req"):
else:
group = "staff"
# =====================================================================
# Departments
#
tablename = "hrm_department"
define_table(tablename,
Field("name", notnull=True, length=64,
label = T("Name"),
requires = [IS_NOT_EMPTY(),
IS_LENGTH(64),
],
),
# Only included in order to be able to set
# realm_entity to filter appropriately
organisation_id(default = root_org,
readable = is_admin,
writable = is_admin,
),
s3_comments(label = T("Description"),
comment = None,
),
*s3_meta_fields())
label_create = T("Create Department")
crud_strings[tablename] = Storage(
label_create = label_create,
title_display = T("Department Details"),
title_list = T("Department Catalog"),
title_update = T("Edit Department"),
title_upload = T("Import Departments"),
label_list_button = T("List Departments"),
label_delete_button = T("Delete Department"),
msg_record_created = T("Department added"),
msg_record_modified = T("Department updated"),
msg_record_deleted = T("Department deleted"),
msg_list_empty = T("Currently no entries in the catalog"),
)
represent = S3Represent(lookup = tablename)
department_id = S3ReusableField("department_id", "reference %s" % tablename,
label = T("Department / Unit"),
ondelete = "SET NULL",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_department.id",
represent,
filterby = "organisation_id",
filter_opts = filter_opts,
)),
sortby = "name",
comment = S3PopupLink(c = "vol" if group == "volunteer" else "hrm",
f = "department",
label = label_create,
),
)
configure("hrm_department",
deduplicate = S3Duplicate(primary = ("name",),
secondary = ("organisation_id",),
),
)
# =====================================================================
# Job Titles (Mayon: StaffResourceType)
#
STAFF = settings.get_hrm_staff_label()
if settings.has_module("vol"):
hrm_types = True
hrm_type_opts = {1: STAFF,
2: T("Volunteer"),
3: T("Both")
}
if group == "staff":
hrm_type_default = 1
elif group == "volunteer":
hrm_type_default = 2
else:
hrm_type_default = 3
else:
hrm_types = False
hrm_type_opts = {1: STAFF}
hrm_type_default = 1
if settings.get_hrm_job_title_deploy():
hrm_types = True
hrm_type_opts[4] = T("Deployment")
if group == "volunteer":
not_filter_opts = (1, 4)
code_label = T("Volunteer ID")
departments = settings.get_hrm_vol_departments()
job_titles = settings.get_hrm_vol_roles()
elif mix_staff:
not_filter_opts = (4,)
code_label = T("Organization ID")
departments = settings.get_hrm_staff_departments()
job_titles = True
else:
# Staff
not_filter_opts = (2, 4)
code_label = T("Staff ID")
departments = settings.get_hrm_staff_departments()
job_titles = True
org_dependent_job_titles = settings.get_hrm_org_dependent_job_titles()
tablename = "hrm_job_title"
define_table(tablename,
Field("name", notnull=True,
length=64, # Mayon compatibility
label = T("Name"),
requires = [IS_NOT_EMPTY(),
IS_LENGTH(64),
],
),
# Enable in templates as-required
self.org_region_id(readable = False,
writable = False,
),
organisation_id(default = root_org if org_dependent_job_titles else None,
readable = is_admin if org_dependent_job_titles else False,
writable = is_admin if org_dependent_job_titles else False,
),
Field("type", "integer",
default = hrm_type_default,
label = T("Type"),
readable = hrm_types,
writable = hrm_types,
represent = s3_options_represent(hrm_type_opts),
requires = IS_IN_SET(hrm_type_opts),
),
s3_comments(comment = None,
label = T("Description"),
),
*s3_meta_fields())
if group == "volunteer":
label = T("Volunteer Role")
label_create = T("Create Volunteer Role")
tooltip = T("The volunteer's role")
crud_strings[tablename] = Storage(
label_create = label_create,
title_display = T("Volunteer Role Details"),
title_list = T("Volunteer Role Catalog"),
title_update = T("Edit Volunteer Role"),
label_list_button = T("List Volunteer Roles"),
label_delete_button = T("Delete Volunteer Role"),
msg_record_created = T("Volunteer Role added"),
msg_record_modified = T("Volunteer Role updated"),
msg_record_deleted = T("Volunteer Role deleted"),
msg_list_empty = T("Currently no entries in the catalog"),
)
else:
label = T("Job Title")
label_create = T("Create Job Title")
tooltip = T("The staff member's official job title")
crud_strings[tablename] = Storage(
label_create = label_create,
title_display = T("Job Title Details"),
title_list = T("Job Title Catalog"),
title_update = T("Edit Job Title"),
label_list_button = T("List Job Titles"),
label_delete_button = T("Delete Job Title"),
msg_record_created = T("Job Title added"),
msg_record_modified = T("Job Title updated"),
msg_record_deleted = T("Job Title deleted"),
msg_list_empty = T("Currently no entries in the catalog"),
)
represent = S3Represent(lookup = tablename,
translate = True,
)
if org_dependent_job_titles:
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_job_title.id",
represent,
filterby = "organisation_id",
filter_opts = filter_opts,
not_filterby = "type",
not_filter_opts = not_filter_opts,
))
else:
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_job_title.id",
represent,
not_filterby = "type",
not_filter_opts = not_filter_opts,
))
job_title_id = S3ReusableField("job_title_id", "reference %s" % tablename,
label = label,
ondelete = "SET NULL",
represent = represent,
requires = requires,
sortby = "name",
comment = S3PopupLink(c = "vol" if group == "volunteer" else "hrm",
f = "job_title",
# Add this for usecases where this is no special controller for an options lookup
#vars = {"prefix": "hrm",
# "parent": "human_resource",
# },
label = label_create,
title = label,
tooltip = tooltip,
),
)
configure("hrm_job_title",
deduplicate = self.hrm_job_title_duplicate,
onvalidation = self.hrm_job_title_onvalidation,
)
# =====================================================================
# Human Resource
#
# People who are either Staff or Volunteers
#
# @ToDo: Move Volunteers to a separate resource?: vol_volunteer
#
# @ToDo: Allocation Status for Events (link table)
#
STAFF = settings.get_hrm_staff_label()
# NB These numbers are hardcoded into KML Export stylesheet
hrm_type_opts = {1: STAFF,
2: T("Volunteer"),
}
hrm_status_opts = {1: T("Active"),
2: T("Resigned"), # They left of their own accord
3: T("Terminated"), # Org terminated their contract
4: T("Died"),
}
organisation_label = settings.get_hrm_organisation_label()
multiple_contracts = settings.get_hrm_multiple_contracts()
use_code = settings.get_hrm_use_code()
if group == "volunteer" or s3.bulk or not group:
# Volunteers don't have a Site
# Don't set a Site for Bulk Imports unless set explicitly
default_site = None
else:
default_site = auth.user.site_id if auth.is_logged_in() else None
if settings.get_org_autocomplete():
org_widget = S3OrganisationAutocompleteWidget(default_from_profile=True)
else:
org_widget = None
if settings.get_org_site_autocomplete():
site_widget = S3SiteAutocompleteWidget()
site_comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Requested By Facility"),
AUTOCOMPLETE_HELP,
))
else:
site_widget = None
site_comment = None
tablename = "hrm_human_resource"
realms = auth.permission.permitted_realms(tablename, method="create")
define_table(tablename,
# Instances
super_link("track_id", "sit_trackable"),
super_link("doc_id", "doc_entity"),
organisation_id(empty = not settings.get_hrm_org_required(),
label = organisation_label,
requires = self.org_organisation_requires(required = True,
realms = realms,
),
widget = org_widget,
),
super_link("site_id", "org_site",
comment = site_comment,
default = default_site,
instance_types = auth.org_site_types,
#empty = False,
label = settings.get_org_site_label(),
ondelete = "SET NULL",
orderby = "org_site.name",
not_filterby = "obsolete",
not_filter_opts = (True,),
readable = True,
writable = True,
realms = realms,
represent = self.org_site_represent,
widget = site_widget,
),
self.pr_person_id(comment = None,
empty = False,
ondelete = "CASCADE",
widget = S3AddPersonWidget(controller = "hrm"),
),
Field("type", "integer",
default = 1,
label = T("Type"),
represent = s3_options_represent(hrm_type_opts),
requires = IS_IN_SET(hrm_type_opts,
zero = None),
widget = RadioWidget.widget,
# Normally set via the Controller we create from
readable = mix_staff,
writable = mix_staff,
),
Field("code",
label = code_label,
represent = lambda v: v or NONE,
readable = use_code,
writable = use_code,
),
job_title_id(readable = job_titles,
writable = job_titles,
),
department_id(readable = departments,
writable = departments,
),
Field("essential", "boolean",
label = T("Essential Staff?"),
represent = s3_yes_no_represent,
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Essential Staff?"),
T("If the person counts as essential staff when evacuating all non-essential staff."),
),
),
),
# Contract
s3_date("start_date",
label = T("Start Date"),
set_min = "#hrm_human_resource_end_date",
),
s3_date("end_date",
label = T("End Date"),
set_max = "#hrm_human_resource_start_date",
start_field = "hrm_human_resource_start_date",
default_interval = 12,
),
# Current status
Field("status", "integer",
default = 1,
label = T("Status"),
represent = s3_options_represent(hrm_status_opts),
requires = IS_IN_SET(hrm_status_opts,
zero = None),
),
# Base location + Site
self.gis_location_id(label = T("Base Location"),
readable = False,
writable = False,
),
Field("org_contact", "boolean",
label = T("Organization Contact"),
represent = s3_yes_no_represent,
readable = False,
writable = False,
),
Field("site_contact", "boolean",
label = T("Facility Contact"),
represent = s3_yes_no_represent,
),
s3_comments(),
*s3_meta_fields())
# @ToDo: Move this configurability to templates rather than lots of deployment_settings
if STAFF == T("Contacts"):
contacts = True
crud_strings["hrm_staff"] = Storage(
label_create = T("Create Contact"),
title_display = T("Contact Details"),
title_list = STAFF,
title_update = T("Edit Contact Details"),
title_upload = T("Import Contacts"),
label_list_button = T("List Contacts"),
label_delete_button = T("Delete Contact"),
msg_record_created = T("Contact added"),
msg_record_modified = T("Contact Details updated"),
msg_record_deleted = T("Contact deleted"),
msg_list_empty = T("No Contacts currently registered"),
)
else:
contacts = False
crud_strings["hrm_staff"] = Storage(
label_create = T("Create Staff Member"),
title_display = T("Staff Member Details"),
title_list = STAFF,
title_update = T("Edit Staff Member Details"),
title_upload = T("Import Staff"),
label_list_button = T("List Staff Members"),
label_delete_button = T("Delete Staff Member"),
msg_record_created = T("Staff Member added"),
msg_record_modified = T("Staff Member Details updated"),
msg_record_deleted = T("Staff Member deleted"),
msg_list_empty = T("No Staff currently registered"),
)
crud_strings["hrm_volunteer"] = Storage(
label_create = T("Create Volunteer"),
title_display = T("Volunteer Details"),
title_list = T("Volunteers"),
title_update = T("Edit Volunteer Details"),
title_upload = T("Import Volunteers"),
label_list_button = T("List Volunteers"),
label_delete_button = T("Delete Volunteer"),
msg_record_created = T("Volunteer added"),
msg_record_modified = T("Volunteer Details updated"),
msg_record_deleted = T("Volunteer deleted"),
msg_list_empty = T("No Volunteers currently registered"),
)
hrm_human_resource_represent = hrm_HumanResourceRepresent(show_link = True)
if group == "staff":
label = STAFF
crud_strings[tablename] = crud_strings["hrm_staff"]
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_human_resource.id",
hrm_human_resource_represent,
filterby = "type",
filter_opts = (1,),
sort = True,
))
widget = S3HumanResourceAutocompleteWidget(group="staff")
elif group == "volunteer":
label = T("Volunteer")
crud_strings[tablename] = crud_strings["hrm_volunteer"]
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_human_resource.id",
hrm_human_resource_represent,
filterby = "type",
filter_opts = (2,),
sort = True,
))
widget = S3HumanResourceAutocompleteWidget(group="volunteer")
else:
label = T("Human Resource")
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_human_resource.id",
hrm_human_resource_represent,
sort = True
))
widget = S3HumanResourceAutocompleteWidget()
if contacts:
crud_strings[tablename] = crud_strings["hrm_staff"]
else:
crud_strings[tablename] = Storage(
label_create = T("Create Staff or Volunteer"),
title_display = T("Human Resource Details"),
title_list = T("Staff & Volunteers"),
title_update = T("Edit Record"),
title_upload = T("Search Staff & Volunteers"),
label_list_button = T("List Staff & Volunteers"),
label_delete_button = T("Delete Record"),
msg_record_created = T("Human Resource added"),
msg_record_modified = T("Record updated"),
msg_record_deleted = T("Record deleted"),
msg_list_empty = T("No staff or volunteers currently registered"),
)
comment = S3PopupLink(c = "vol" if group == "volunteer" else "hrm",
f = group or "staff",
vars = {"child": "human_resource_id"},
label = crud_strings["hrm_%s" % group].label_create if group else \
crud_strings[tablename].label_create,
title = label,
tooltip = AUTOCOMPLETE_HELP,
)
human_resource_id = S3ReusableField("human_resource_id", "reference %s" % tablename,
label = label,
ondelete = "RESTRICT",
represent = hrm_human_resource_represent,
requires = requires,
sortby = ["type", "status"],
widget = widget,
comment = comment,
)
# Custom Method for S3HumanResourceAutocompleteWidget and S3AddPersonWidget
set_method = self.set_method
set_method("hrm", "human_resource",
method = "search_ac",
action = self.hrm_search_ac)
set_method("hrm", "human_resource",
method = "lookup",
action = self.hrm_lookup)
# Components
add_components(tablename,
# Contact Data
pr_contact = (# Email
{"name": "email",
"link": "pr_person",
"joinby": "id",
"key": "pe_id",
"fkey": "pe_id",
"pkey": "person_id",
"filterby": {
"contact_method": "EMAIL",
},
},
# Mobile Phone
{"name": "phone",
"link": "pr_person",
"joinby": "id",
"key": "pe_id",
"fkey": "pe_id",
"pkey": "person_id",
"filterby": {
"contact_method": "SMS",
},
},
),
pr_contact_emergency = {"link": "pr_person",
"joinby": "id",
"key": "pe_id",
"fkey": "pe_id",
"pkey": "person_id",
},
pr_address = ({"name": "home_address",
"link": "pr_person",
"joinby": "id",
"key": "pe_id",
"fkey": "pe_id",
"pkey": "person_id",
"filterby": {
"type": "1",
},
},
),
# Experience & Skills
hrm_appraisal = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
hrm_certification = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
hrm_competency = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
hrm_contract = {"joinby": "human_resource_id",
"multiple": multiple_contracts,
},
hrm_credential = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
pr_education = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
hrm_experience = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
hrm_insurance = "human_resource_id",
hrm_salary = "human_resource_id",
hrm_training = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
hrm_trainings = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
"multiple": False,
},
# Organisation Groups
org_group_person = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
# Projects
project_project = {"link": "project_human_resource_project",
"joinby": "human_resource_id",
"key": "project_id",
},
# Application(s) for Deployment
deploy_application = "human_resource_id",
# Assignments
deploy_assignment = "human_resource_id",
# Hours
#hrm_hours = "human_resource_id",
# Tags
hrm_human_resource_tag = {"name": "tag",
"joinby": "human_resource_id",
},
)
# Optional Components
teams = settings.get_hrm_teams()
if teams:
add_components(tablename,
# Team Memberships
pr_group_membership = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
)
if group in ("volunteer", None) or mix_staff:
add_components(tablename,
# Programmes
hrm_programme_hours = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
},
# Availability
pr_person_availability = {"link": "pr_person",
"joinby": "id",
"key": "id",
"fkey": "person_id",
"pkey": "person_id",
# Will need tochange in future
"multiple": False,
},
# Volunteer Details
vol_details = {"joinby": "human_resource_id",
"multiple": False,
},
# Volunteer Cluster
vol_volunteer_cluster = {"joinby": "human_resource_id",
"multiple": False,
},
)
if settings.get_hrm_multiple_job_titles():
add_components(tablename,
# Job Titles
hrm_job_title_human_resource = {"name": "job_title",
"joinby": "human_resource_id",
}
)
crud_fields = ["organisation_id",
"person_id",
"start_date",
"end_date",
"status",
]
if use_code:
crud_fields.insert(2, "code")
filter_widgets = hrm_human_resource_filters(resource_type = group,
hrm_type_opts = hrm_type_opts,
)
report_fields = ["organisation_id",
"person_id",
"person_id$gender",
(T("Training"), "training.course_id"),
"location_id$L1",
"location_id$L2",
]
if settings.get_org_branches():
report_fields.insert(1, (settings.get_hrm_root_organisation_label(), "organisation_id$root_organisation"))
if teams:
report_fields.append((T(teams), "group_membership.group_id"))
if mix_staff:
crud_fields.insert(1, "site_id")
crud_fields.insert(2, "type")
posn = 4
if use_code:
posn += 1
crud_fields.insert(posn, "job_title_id")
if settings.get_hrm_staff_departments() or \
settings.get_hrm_vol_departments():
crud_fields.insert(posn, "department_id")
vol_experience = settings.get_hrm_vol_experience()
if vol_experience in ("programme", "both"):
crud_fields.insert(posn, S3SQLInlineComponent("programme_hours",
label = "",
fields = ["programme_id"],
link = False,
multiple = False,
))
elif vol_experience == "activity":
report_fields.append("person_id$activity_hours.activity_hours_activity_type.activity_type_id")
crud_fields.append("details.volunteer_type")
if settings.get_hrm_vol_availability_tab() is False and \
settings.get_pr_person_availability_options() is not None:
crud_fields.append("person_availability.options")
crud_fields.append("details.card")
vol_active = settings.get_hrm_vol_active()
if vol_active and not callable(vol_active):
# Set manually
crud_fields.append("details.active")
report_fields.extend(("site_id",
"department_id",
"job_title_id",
(T("Age Group"), "person_id$age_group"),
"person_id$education.level",
))
# Needed for Age Group VirtualField to avoid extra DB calls
report_fields_extra = ["person_id$date_of_birth"]
elif group == "volunteer":
# This gets copied to hrm_human_resource.location_id onaccept, faster to lookup without joins
#location_context = "person_id$address.location_id" # When not using S3Track()
if settings.get_hrm_vol_roles():
crud_fields.insert(2, "job_title_id")
report_fields.append("job_title_id")
if settings.get_hrm_vol_departments():
crud_fields.insert(4, "department_id")
report_fields.append("department_id")
vol_experience = settings.get_hrm_vol_experience()
if vol_experience in ("programme", "both"):
crud_fields.insert(2, S3SQLInlineComponent("programme_hours",
label = "",
fields = ["programme_id"],
link = False,
multiple = False,
))
elif vol_experience == "activity":
report_fields.append("person_id$activity_hours.activity_hours_activity_type.activity_type_id")
crud_fields.append("details.volunteer_type")
if settings.get_hrm_vol_availability_tab() is False and \
settings.get_pr_person_availability_options() is not None:
crud_fields.append("person_availability.options")
crud_fields.extend(("details.card",
# @ToDo: Move these to the IFRC Template (PH RC only people to use this)
#"volunteer_cluster.vol_cluster_type_id",
#"volunteer_cluster.vol_cluster_id",
#"volunteer_cluster.vol_cluster_position_id",
))
vol_active = settings.get_hrm_vol_active()
if vol_active and not callable(vol_active):
# Set manually
crud_fields.append("details.active")
report_fields.extend(((T("Age Group"), "person_id$age_group"),
"person_id$education.level",
))
# Needed for Age Group VirtualField to avoid extra DB calls
report_fields_extra = ["person_id$date_of_birth"]
else:
# Staff
# This gets copied to hrm_human_resource.location_id onaccept, faster to lookup without joins
#location_context = "site_id$location_id" # When not using S3Track()
crud_fields.insert(1, "site_id")
posn = 3
if use_code:
posn += 1
crud_fields.insert(posn, "job_title_id")
if settings.get_hrm_staff_departments():
crud_fields.insert(posn, "department_id")
report_fields.extend(("site_id",
"department_id",
"job_title_id",
))
report_fields_extra = []
# Redirect to the Details tabs after creation
if controller in ("hrm", "vol"):
hrm_url = URL(c=controller, f="person",
vars = {"human_resource.id": "[id]"},
)
else:
# Being added as a component to Org, Site or Project
hrm_url = None
# Custom Form
s3.hrm = Storage(crud_fields = crud_fields) # Store fields for easy ability to modify later
crud_form = S3SQLCustomForm(*crud_fields)
if settings.get_hrm_org_required():
mark_required = ("organisation_id",)
else:
mark_required = None
configure(tablename,
context = {#"location": location_context,
"organisation": "organisation_id",
"person": "person_id",
"project": "project.id",
"site": "site_id",
},
create_next = hrm_url,
crud_form = crud_form,
# This allows only one HR record per person and organisation,
# if multiple HR records of the same person with the same org
# are desired, then this needs an additional criteria in the
# query (e.g. job title, or type):
deduplicate = S3Duplicate(primary = ("person_id",),
secondary = ("organisation_id",),
ignore_deleted = True,
),
deletable = settings.get_hrm_deletable(),
#extra_fields = ["person_id"]
filter_widgets = filter_widgets,
mark_required = mark_required,
onaccept = hrm_human_resource_onaccept,
ondelete = self.hrm_human_resource_ondelete,
realm_components = ("presence",),
report_fields = report_fields_extra,
report_options = Storage(
rows = report_fields,
cols = report_fields,
fact = report_fields,
methods = ("count", "list",),
defaults = Storage(
rows = "organisation_id",
cols = "training.course_id",
fact = "count(person_id)",
)
),
# Default summary
summary = [{"name": "addform",
"common": True,
"widgets": [{"method": "create"}],
},
{"name": "table",
"label": "Table",
"widgets": [{"method": "datatable"}]
},
{"name": "report",
"label": "Report",
"widgets": [{"method": "report",
"ajax_init": True}]
},
{"name": "map",
"label": "Map",
"widgets": [{"method": "map",
"ajax_init": True}],
},
],
super_entity = ("sit_trackable", "doc_entity"),
#update_next = hrm_url,
update_realm = True,
)
# =====================================================================
# Job Titles <> Human Resources link table
#
tablename = "hrm_job_title_human_resource"
define_table(tablename,
human_resource_id(empty = False,
ondelete = "CASCADE",
),
job_title_id(empty = False,
ondelete = "CASCADE",
),
Field("main", "boolean",
default = True,
label = T("Main?"),
represent = s3_yes_no_represent,
),
s3_date(label = T("Start Date")),
s3_date("end_date",
label = T("End Date"),
),
s3_comments(),
*s3_meta_fields())
configure("hrm_job_title_human_resource",
onaccept = self.hrm_job_title_human_resource_onaccept,
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {"hrm_department_id": department_id,
"hrm_job_title_id": job_title_id,
"hrm_human_resource_id": human_resource_id,
"hrm_status_opts": hrm_status_opts,
"hrm_type_opts": hrm_type_opts,
"hrm_human_resource_represent": hrm_human_resource_represent,
}
# -------------------------------------------------------------------------
@staticmethod
def defaults():
"""
Safe defaults for model-global names in case module is disabled
"""
dummy = S3ReusableField.dummy
return {"hrm_department_id": dummy("department_id"),
"hrm_job_title_id": dummy("job_title_id"),
"hrm_human_resource_id": dummy("human_resource_id"),
}
# -------------------------------------------------------------------------
@staticmethod
def hrm_job_title_duplicate(item):
"""
Update detection for hrm_job_title
@param item: the S3ImportItem
"""
data_get = item.data.get
name = data_get("name", None)
if current.deployment_settings.get_hrm_org_dependent_job_titles():
org = data_get("organisation_id", None)
else:
org = None
role_type = data_get("type", None)
table = item.table
query = (table.name.lower() == s3_str(name).lower())
if org:
query = query & (table.organisation_id == org)
if role_type:
query = query & (table.type == role_type)
duplicate = current.db(query).select(table.id,
limitby = (0, 1),
).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def hrm_job_title_onvalidation(form):
"""
Ensure Job Titles are not Org-specific unless configured to be so
"""
if not current.deployment_settings.get_hrm_org_dependent_job_titles():
form.vars["organisation_id"] = None
# -------------------------------------------------------------------------
@staticmethod
def hrm_job_title_human_resource_onaccept(form):
"""
Record creation post-processing
If the job title is the main, set the
human_resource.job_title_id accordingly
"""
form_vars = form.vars
if form_vars.main:
# Read the record
# (safer than relying on vars which might be missing on component tabs)
db = current.db
ltable = db.hrm_job_title_human_resource
record = db(ltable.id == form_vars.id).select(ltable.human_resource_id,
ltable.job_title_id,
limitby = (0, 1),
).first()
# Set the HR's job_title_id to the new job title
htable = db.hrm_human_resource
db(htable.id == record.human_resource_id).update(job_title_id = record.job_title_id)
# -------------------------------------------------------------------------
@staticmethod
def hrm_search_ac(r, **attr):
"""
JSON search method for S3HumanResourceAutocompleteWidget and S3AddPersonWidget
- full name search
- include Organisation & Job Role in the output
"""
resource = r.resource
response = current.response
# Query comes in pre-filtered to accessible & deletion_status
# Respect response.s3.filter
resource.add_filter(response.s3.filter)
_vars = current.request.get_vars
# JQueryUI Autocomplete uses "term"
# old JQuery Autocomplete uses "q"
# what uses "value"?
value = _vars.term or _vars.value or _vars.q or None
if not value:
r.error(400, "No value provided!")
# We want to do case-insensitive searches
# (default anyway on MySQL/SQLite, but not PostgreSQL)
value = s3_str(value).lower()
if " " in value:
# Multiple words
# - check for match of first word against first_name
# - & second word against either middle_name or last_name
value1, value2 = value.split(" ", 1)
value2 = value2.strip()
query = ((FS("person_id$first_name").lower().like(value1 + "%")) & \
((FS("person_id$middle_name").lower().like(value2 + "%")) | \
(FS("person_id$last_name").lower().like(value2 + "%"))))
else:
# Single word - check for match against any of the 3 names
value = value.strip()
query = ((FS("person_id$first_name").lower().like(value + "%")) | \
(FS("person_id$middle_name").lower().like(value + "%")) | \
(FS("person_id$last_name").lower().like(value + "%")))
resource.add_filter(query)
settings = current.deployment_settings
limit = int(_vars.limit or 0)
MAX_SEARCH_RESULTS = settings.get_search_max_results()
if (not limit or limit > MAX_SEARCH_RESULTS) and resource.count() > MAX_SEARCH_RESULTS:
output = [
{"label": str(current.T("There are more than %(max)s results, please input more characters.") % \
{"max": MAX_SEARCH_RESULTS}),
},
]
else:
fields = ["id",
"person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
"job_title_id$name",
]
show_orgs = settings.get_hrm_show_organisation()
if show_orgs:
fields.append("organisation_id$name")
name_format = settings.get_pr_name_format()
test = name_format % {"first_name": 1,
"middle_name": 2,
"last_name": 3,
}
test = "".join(ch for ch in test if ch in ("1", "2", "3"))
if test[:1] == "1":
orderby = "pr_person.first_name"
elif test[:1] == "2":
orderby = "pr_person.middle_name"
else:
orderby = "pr_person.last_name"
rows = resource.select(fields,
start = 0,
limit = limit,
orderby = orderby,
)["rows"]
output = []
iappend = output.append
for row in rows:
name = Storage(first_name=row["pr_person.first_name"],
middle_name=row["pr_person.middle_name"],
last_name=row["pr_person.last_name"],
)
name = s3_fullname(name)
item = {"id" : row["hrm_human_resource.id"],
"name" : name,
}
if show_orgs:
item["org"] = row["org_organisation.name"]
job_title = row.get("hrm_job_title.name", None)
if job_title:
item["job"] = job_title
iappend(item)
response.headers["Content-Type"] = "application/json"
return json.dumps(output, separators=SEPARATORS)
# -------------------------------------------------------------------------
@staticmethod
def hrm_lookup(r, **attr):
"""
JSON lookup method for S3AddPersonWidget
"""
hrm_id = r.id
if not hrm_id:
r.error(400, "No id provided!")
db = current.db
s3db = current.s3db
settings = current.deployment_settings
request_dob = settings.get_pr_request_dob()
request_gender = settings.get_pr_request_gender()
home_phone = settings.get_pr_request_home_phone()
tags = settings.get_pr_request_tags()
htable = db.hrm_human_resource
ptable = db.pr_person
ctable = s3db.pr_contact
fields = [htable.organisation_id,
ptable.pe_id,
# We have these already from the search_ac
#ptable.first_name,
#ptable.middle_name,
#ptable.last_name,
]
separate_name_fields = settings.get_pr_separate_name_fields()
if separate_name_fields:
middle_name = separate_name_fields == 3
fields += [ptable.first_name,
ptable.middle_name,
ptable.last_name,
]
left = None
if request_dob:
fields.append(ptable.date_of_birth)
if request_gender:
fields.append(ptable.gender)
if current.request.controller == "vol":
dtable = s3db.pr_person_details
fields.append(dtable.occupation)
left = dtable.on(dtable.person_id == ptable.id)
if tags:
fields.append(ptable.id)
query = (htable.id == hrm_id) & \
(ptable.id == htable.person_id)
row = db(query).select(left=left,
*fields).first()
if left:
occupation = row["pr_person_details.occupation"]
else:
occupation = None
organisation_id = row["hrm_human_resource.organisation_id"]
row = row["pr_person"]
#first_name = row.first_name
#middle_name = row.middle_name
#last_name = row.last_name
if request_dob:
date_of_birth = row.date_of_birth
else:
date_of_birth = None
if request_gender:
gender = row.gender
else:
gender = None
if separate_name_fields:
first_name = row.first_name
last_name = row.last_name
if middle_name:
middle_name = row.middle_name
else:
first_name = None
middle_name = None
last_name = None
# Tags
if tags:
tags = [t[1] for t in tags]
ttable = s3db.pr_person_tag
query = (ttable.person_id == row.id) & \
(ttable.deleted == False) & \
(ttable.tag.belongs(tags))
tags = db(query).select(ttable.tag,
ttable.value,
)
# Lookup contacts separately as we can't limitby here
if home_phone:
contact_methods = ("SMS", "EMAIL", "HOME_PHONE")
else:
contact_methods = ("SMS", "EMAIL")
query = (ctable.pe_id == row.pe_id) & \
(ctable.contact_method.belongs(contact_methods))
rows = db(query).select(ctable.contact_method,
ctable.value,
orderby = ctable.priority,
)
email = mobile_phone = None
if home_phone:
home_phone = None
for row in rows:
if not email and row.contact_method == "EMAIL":
email = row.value
elif not mobile_phone and row.contact_method == "SMS":
mobile_phone = row.value
elif not home_phone and row.contact_method == "HOME_PHONE":
home_phone = row.value
if email and mobile_phone and home_phone:
break
else:
for row in rows:
if not email and row.contact_method == "EMAIL":
email = row.value
elif not mobile_phone and row.contact_method == "SMS":
mobile_phone = row.value
if email and mobile_phone:
break
# Minimal flattened structure
item = {}
if first_name:
item["first_name"] = first_name
if middle_name:
item["middle_name"] = middle_name
if last_name:
item["last_name"] = last_name
if email:
item["email"] = email
if mobile_phone:
item["mphone"] = mobile_phone
if home_phone:
item["hphone"] = home_phone
if gender:
item["sex"] = gender
if date_of_birth:
item["dob"] = date_of_birth
if occupation:
item["occupation"] = occupation
if organisation_id:
item["org_id"] = organisation_id
for row in tags:
item[row.tag] = row.value
output = json.dumps(item, separators=SEPARATORS)
current.response.headers["Content-Type"] = "application/json"
return output
# -------------------------------------------------------------------------
@staticmethod
def hrm_human_resource_ondelete(row):
""" On-delete routine for HR records """
db = current.db
htable = db.hrm_human_resource
# Update PE hierarchy
person_id = row.person_id
if person_id:
current.s3db.pr_update_affiliations(htable, row)
# =============================================================================
class HRSiteModel(S3Model):
names = ("hrm_human_resource_site",)
def model(self):
T = current.T
# =========================================================================
# Link between Human Resources & Facilities
# - this is used to allow different Site Contacts per Sector
# - it can be used to allow the right UI interface when adding HRs to a
# Facility via the Staff tab, although we use hrm_Assign for that now.
#
tablename = "hrm_human_resource_site"
self.define_table(tablename,
self.hrm_human_resource_id(ondelete = "CASCADE"),
self.org_site_id(),
self.org_sector_id(),
Field("site_contact", "boolean",
label = T("Facility Contact"),
represent = lambda opt: \
(T("No"), T("Yes"))[opt == True],
),
*s3_meta_fields())
self.configure(tablename,
# Each HR can only be assigned to one site at a time:
deduplicate = S3Duplicate(primary = ("human_resource_id",),
secondary = ("sector_id",),
),
onaccept = self.hrm_human_resource_site_onaccept,
ondelete = self.hrm_human_resource_site_ondelete,
)
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Assign Staff"),
title_display = T("Staff Assignment Details"),
title_list = T("Staff Assignments"),
title_update = T("Edit Staff Assignment"),
label_list_button = T("List Staff Assignments"),
label_delete_button = T("Delete Staff Assignment"),
msg_record_created = T("Staff Assigned"),
msg_record_modified = T("Staff Assignment updated"),
msg_record_deleted = T("Staff Assignment removed"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no staff assigned"),
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# -------------------------------------------------------------------------
@staticmethod
def hrm_human_resource_site_onaccept(form):
"""
Update the Human Resource record with the site_id
"""
db = current.db
human_resource_id = form.vars.human_resource_id
# Remove any additional records for this HR
# (i.e. staff was assigned elsewhere previously)
# @ToDo: Allow one person to be the Site Contact for multiple sectors
ltable = db.hrm_human_resource_site
rows = db(ltable.human_resource_id == human_resource_id).select(ltable.id,
ltable.site_id,
#ltable.sector_id,
ltable.human_resource_id,
ltable.site_contact,
orderby = ~ltable.id
)
first = True
for row in rows:
if first:
first = False
continue
db(ltable.id == row.id).delete()
record = rows.first()
site_id = record.site_id
table = db.hrm_human_resource
db(table.id == human_resource_id).update(site_id = site_id,
site_contact = record.site_contact
)
# Update realm_entity of HR
entity = current.s3db.pr_get_pe_id("org_site", site_id)
if entity:
current.auth.set_realm_entity(table, human_resource_id,
entity = entity,
force_update = True)
# Fire the normal onaccept
hrform = Storage(id = human_resource_id)
hrm_human_resource_onaccept(hrform)
# -------------------------------------------------------------------------
@staticmethod
def hrm_human_resource_site_ondelete(row):
"""
Update the Human Resource record with the site_id
"""
db = current.db
table = db.hrm_human_resource
human_resource_id = row.human_resource_id
db(table.id == human_resource_id).update(location_id = None,
site_id = None,
site_contact = False,
)
# Update realm_entity of HR
current.auth.set_realm_entity(table,
human_resource_id,
force_update = True,
)
# =============================================================================
class HRSalaryModel(S3Model):
""" Data Model to track salaries of staff """
names = ("hrm_staff_level",
"hrm_salary_grade",
"hrm_salary",
)
def model(self):
db = current.db
T = current.T
define_table = self.define_table
configure = self.configure
organisation_id = self.org_organisation_id
organisation_requires = self.org_organisation_requires
# =====================================================================
# Staff Level
#
tablename = "hrm_staff_level"
define_table(tablename,
organisation_id(
requires = organisation_requires(updateable=True),
),
Field("name",
label = T("Staff Level"),
),
*s3_meta_fields())
configure(tablename,
deduplicate = S3Duplicate(primary = ("name",
"organisation_id",
),
),
)
staff_level_represent = hrm_OrgSpecificTypeRepresent(lookup=tablename)
# =====================================================================
# Salary Grades
#
tablename = "hrm_salary_grade"
define_table(tablename,
organisation_id(
requires = organisation_requires(updateable=True),
),
Field("name",
label = T("Salary Grade"),
),
*s3_meta_fields())
configure(tablename,
deduplicate = S3Duplicate(primary = ("name",
"organisation_id",
),
),
)
salary_grade_represent = hrm_OrgSpecificTypeRepresent(lookup=tablename)
# =====================================================================
# Salary
#
tablename = "hrm_salary"
define_table(tablename,
self.pr_person_id(),
self.hrm_human_resource_id(label = T("Staff Record"),
widget = None,
comment = None,
),
Field("staff_level_id", "reference hrm_staff_level",
label = T("Staff Level"),
represent = staff_level_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db,
"hrm_staff_level.id",
staff_level_represent,
)),
comment = S3PopupLink(f = "staff_level",
label = T("Create Staff Level"),
),
),
Field("salary_grade_id", "reference hrm_salary_grade",
label = T("Salary Grade"),
represent = salary_grade_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db,
"hrm_salary_grade.id",
salary_grade_represent,
)),
comment = S3PopupLink(f = "salary_grade",
label = T("Create Salary Grade"),
),
),
s3_date("start_date",
default = "now",
label = T("Start Date"),
set_min = "#hrm_salary_end_date",
),
s3_date("end_date",
label = T("End Date"),
set_max = "#hrm_salary_start_date",
),
Field("monthly_amount", "double",
represent = lambda v: \
IS_FLOAT_AMOUNT.represent(v,
precision = 2,
),
requires = IS_EMPTY_OR(
IS_FLOAT_AMOUNT(minimum = 0.0)
),
default = 0.0,
),
*s3_meta_fields())
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Salary"),
title_display = T("Salary Details"),
title_list = T("Salaries"),
title_update = T("Edit Salary"),
label_list_button = T("List Salaries"),
label_delete_button = T("Delete Salary"),
msg_record_created = T("Salary added"),
msg_record_modified = T("Salary updated"),
msg_record_deleted = T("Salary removed"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no salary registered"),
)
configure(tablename,
onvalidation = self.hrm_salary_onvalidation,
orderby = "%s.start_date desc" % tablename,
)
# =====================================================================
# Salary Coefficient
#
# @todo: implement
# =====================================================================
# Allowance Level
#
# @todo: implement
return {}
# -------------------------------------------------------------------------
@staticmethod
def hrm_salary_onvalidation(form):
try:
form_vars = form.vars
start_date = form_vars.get("start_date")
end_date = form_vars.get("end_date")
except AttributeError:
return
if start_date and end_date and start_date > end_date:
form.errors["end_date"] = current.T("End date must be after start date.")
return
# =============================================================================
class hrm_OrgSpecificTypeRepresent(S3Represent):
""" Representation of organisation-specific taxonomic categories """
def __init__(self, lookup=None):
""" Constructor """
if lookup is None:
raise SyntaxError("must specify a lookup table")
fields = ("name", "organisation_id")
super(hrm_OrgSpecificTypeRepresent, self).__init__(lookup = lookup,
fields = fields,
)
# -------------------------------------------------------------------------
def lookup_rows(self, key, values, fields=None):
"""
Custom rows lookup
@param key: the key Field
@param values: the values
@param fields: unused (retained for API compatibility)
"""
s3db = current.s3db
table = self.table
otable = s3db.org_organisation
left = otable.on(otable.id == table.organisation_id)
if len(values) == 1:
query = (key == values[0])
else:
query = key.belongs(values)
rows = current.db(query).select(table.id,
table.name,
otable.id,
otable.name,
otable.acronym,
left = left,
)
self.queries += 1
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a row
@param row: the Row
"""
try:
name = row[self.tablename].name
except AttributeError:
return row.name
try:
organisation = row["org_organisation"]
except AttributeError:
return name
if organisation.acronym:
return "%s (%s)" % (name, organisation.acronym)
elif organisation.name:
return "%s (%s)" % (name, organisation.name)
else:
return name
# =============================================================================
class HRInsuranceModel(S3Model):
""" Data Model to track insurance information of staff members """
names = ("hrm_insurance",
)
def model(self):
T = current.T
insurance_types = {"SOCIAL": T("Social Insurance"),
"HEALTH": T("Health Insurance"),
}
# =====================================================================
# Insurance Information
#
tablename = "hrm_insurance"
self.define_table(tablename,
# The original use (IFRC) used human_resource_id instead of the usual person_id in order to put it into the HR form
self.hrm_human_resource_id(),
# RMS uses person_id in order to have on a common Medical Information tab with Physical Description fields
#self.pr_person_id(),
Field("type",
label = T("Type"),
represent = s3_options_represent(insurance_types),
requires = IS_IN_SET(insurance_types),
),
Field("insurance_number",
length = 128,
label = T("Insurance Number"),
requires = IS_LENGTH(128),
),
Field("insurer",
length = 255,
label = T("Insurer"),
requires = IS_LENGTH(255),
),
Field("provider",
length = 255,
label = T("Provider"),
requires = IS_LENGTH(255),
),
Field("phone",
label = T("Emergency Number"),
requires = IS_EMPTY_OR(
IS_PHONE_NUMBER_MULTI(),
),
),
#Field("beneficiary",
# label = T("Beneficiary"),
# ),
s3_comments(),
*s3_meta_fields())
self.configure(tablename,
#context = {"person": "human_resource_id$person_id",
# },
deduplicate = S3Duplicate(primary = ("human_resource_id",
#"person_id",
"type",
),
),
)
return {}
# =============================================================================
class HRContractModel(S3Model):
""" Data model to track employment contract details of staff members """
names = ("hrm_contract",
)
def model(self):
T = current.T
contract_terms = {"SHORT": T("Short-term"),
"LONG": T("Long-term"),
"PERMANENT": T("Permanent")
}
hours_models = {"PARTTIME": T("Part-time"),
"FULLTIME": T("Full-time"),
}
# =====================================================================
# Employment Contract Details
#
tablename = "hrm_contract"
self.define_table(tablename,
self.hrm_human_resource_id(),
Field("name",
label = T("Name"),
),
s3_date(label = T("Start Date"),
),
#s3_date("end_date",
# label = T("End Date"),
# ),
Field("term",
requires = IS_IN_SET(contract_terms),
represent = s3_options_represent(contract_terms),
),
Field("hours",
requires = IS_IN_SET(hours_models),
represent = s3_options_represent(hours_models),
),
s3_comments(),
*s3_meta_fields())
self.configure(tablename,
deduplicate = S3Duplicate(primary = ("human_resource_id",)),
)
return {}
# =============================================================================
class HRJobModel(S3Model):
"""
Unused
@ToDo: If bringing back into use then Availability better as Person component not HR
"""
names = ("hrm_position",
"hrm_position_id",
)
def model(self):
s3db = current.s3db
define_table = self.define_table
job_title_id = self.hrm_job_title_id
organisation_id = self.org_organisation_id
site_id = self.org_site_id
group_id = self.pr_group_id
human_resource_id = self.hrm_human_resource_id
hrm_type_opts = self.hrm_type_opts
# =========================================================================
# Positions
#
# @ToDo: Shifts for use in Scenarios & during Exercises & Events
#
# @ToDo: Vacancies
#
tablename = "hrm_position"
table = define_table(tablename,
job_title_id(empty = False),
organisation_id(empty = False),
site_id,
group_id(label = "Team"),
*s3_meta_fields())
table.site_id.readable = table.site_id.writable = True
#crud_strings[tablename] = Storage(
# label_create = T("Add Position"),
# title_display = T("Position Details"),
# title_list = T("Position Catalog"),
# title_update = T("Edit Position"),
# label_list_button = T("List Positions"),
# label_delete_button = T("Delete Position"),
# msg_record_created = T("Position added"),
# msg_record_modified = T("Position updated"),
# msg_record_deleted = T("Position deleted"),
# msg_list_empty = T("Currently no entries in the catalog"),
# )
#label_create = crud_strings[tablename].label_create
position_id = S3ReusableField("position_id", "reference %s" % tablename,
label = T("Position"),
ondelete = "SET NULL",
#represent = hrm_position_represent,
requires = IS_EMPTY_OR(IS_ONE_OF(db,
"hrm_position.id",
#hrm_position_represent,
)),
sortby = "name",
#comment = DIV(A(label_create,
# _class="s3_add_resource_link",
# _href=URL(f="position",
# args="create",
# vars={"format": "popup"}
# ),
# _target="top",
# _title=label_create),
# DIV(_class="tooltip",
# _title="%s|%s" % (label_create,
# T("Add a new job role to the catalog.")))),
)
# =========================================================================
# Availability
#
# unused - see PRAvailabilityModel
#
weekdays = {1: T("Monday"),
2: T("Tuesday"),
3: T("Wednesday"),
4: T("Thursday"),
5: T("Friday"),
6: T("Saturday"),
7: T("Sunday")
}
weekdays_represent = lambda opt: ",".join([str(weekdays[o]) for o in opt])
tablename = "hrm_availability"
define_table(tablename,
human_resource_id(),
Field("date_start", "date"),
Field("date_end", "date"),
Field("day_of_week", "list:integer",
default = [1, 2, 3, 4, 5],
represent = weekdays_represent,
requires = IS_EMPTY_OR(IS_IN_SET(weekdays,
zero=None,
multiple=True)),
widget = CheckboxesWidgetS3.widget,
),
Field("hours_start", "time"),
Field("hours_end", "time"),
#location_id(label=T("Available for Location"),
# requires=IS_ONE_OF(db, "gis_location.id",
# gis_LocationRepresent(),
# filterby="level",
# # @ToDo Should this change per config?
# filter_opts=gis.region_level_keys,
# orderby="gis_location.name"),
# widget=None),
*s3_meta_fields())
# =========================================================================
# Hours registration
#
tablename = "hrm_hours"
define_table(tablename,
human_resource_id(),
Field("timestmp_in", "datetime"),
Field("timestmp_out", "datetime"),
Field("hours", "double"),
*s3_meta_fields())
# =========================================================================
# Vacancy
#
# These are Positions which are not yet Filled
#
tablename = "hrm_vacancy"
define_table(tablename,
organisation_id(),
#Field("code"),
Field("title"),
Field("description", "text"),
self.super_link("site_id", "org_site",
label = T("Facility"),
readable = False,
writable = False,
sort = True,
represent = s3db.org_site_represent,
),
Field("type", "integer",
default = 1,
label = T("Type"),
represent = s3_options_represent(hrm_type_opts),
requires = IS_IN_SET(hrm_type_opts, zero=None),
),
Field("number", "integer"),
#location_id(),
Field("from", "date"),
Field("until", "date"),
Field("open", "boolean",
default = False,
),
Field("app_deadline", "date",
#label = T("Application Deadline"),
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {"hrm_position_id": position_id,
}
# =============================================================================
class HRSkillModel(S3Model):
names = ("hrm_skill_type",
"hrm_skill",
"hrm_competency_rating",
"hrm_competency",
#"hrm_competency_id",
"hrm_credential",
"hrm_training",
"hrm_trainings",
"hrm_event_type",
"hrm_training_event",
"hrm_training_event_id",
"hrm_event_location",
"hrm_event_tag",
"hrm_training_event_report",
"hrm_certificate",
"hrm_certification",
"hrm_certification_onaccept",
"hrm_certificate_skill",
"hrm_course",
"hrm_course_certificate",
"hrm_course_job_title",
"hrm_course_sector",
"hrm_course_id",
"hrm_skill_id",
"hrm_multi_skill_id",
"hrm_multi_skill_represent",
)
def model(self):
T = current.T
db = current.db
auth = current.auth
request = current.request
folder = request.folder
s3 = current.response.s3
settings = current.deployment_settings
job_title_id = self.hrm_job_title_id
location_id = self.gis_location_id
organisation_id = self.org_organisation_id
person_id = self.pr_person_id
AUTOCOMPLETE_HELP = current.messages.AUTOCOMPLETE_HELP
ORGANISATION = settings.get_hrm_organisation_label()
ADMIN = current.session.s3.system_roles.ADMIN
is_admin = auth.s3_has_role(ADMIN)
is_float_represent = IS_FLOAT_AMOUNT.represent
float_represent = lambda v: is_float_represent(v, precision=2)
add_components = self.add_components
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
super_link = self.super_link
root_org = auth.root_org()
if is_admin:
filter_opts = ()
elif root_org:
filter_opts = (root_org, None)
else:
filter_opts = (None,)
c = current.request.controller
if c not in ("hrm", "vol"):
c = "hrm"
if settings.get_org_autocomplete():
widget = S3OrganisationAutocompleteWidget(default_from_profile=True)
else:
widget = None
# ---------------------------------------------------------------------
# Skill Types
# - optional hierarchy of skills
# disabled by default, enable with deployment_settings.hrm.skill_types = True
# if enabled, then each needs their own list of competency levels
#
tablename = "hrm_skill_type"
define_table(tablename,
Field("name", notnull=True, unique=True, length=64,
label = T("Name"),
requires = [IS_NOT_EMPTY(),
IS_LENGTH(64),
],
),
s3_comments(),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Skill Type"),
title_display = T("Details"),
title_list = T("Skill Type Catalog"),
title_update = T("Edit Skill Type"),
label_list_button = T("List Skill Types"),
label_delete_button = T("Delete Skill Type"),
msg_record_created = T("Skill Type added"),
msg_record_modified = T("Skill Type updated"),
msg_record_deleted = T("Skill Type deleted"),
msg_list_empty = T("Currently no entries in the catalog"),
)
skill_types = settings.get_hrm_skill_types()
label_create = crud_strings[tablename].label_create
represent = S3Represent(lookup = tablename)
skill_type_id = S3ReusableField("skill_type_id", "reference %s" % tablename,
default = self.skill_type_default,
label = T("Skill Type"),
ondelete = "RESTRICT",
readable = skill_types,
writable = skill_types,
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_skill_type.id",
represent
)),
sortby = "name",
comment = S3PopupLink(c = c,
f = "skill_type",
label = label_create,
title = label_create,
tooltip = T("Add a new skill type to the catalog."),
),
)
configure(tablename,
deduplicate = S3Duplicate(),
)
# Components
add_components(tablename,
hrm_competency_rating = "skill_type_id",
)
# ---------------------------------------------------------------------
# Skills
# - these can be simple generic skills or can come from certifications
#
tablename = "hrm_skill"
define_table(tablename,
skill_type_id(empty = False),
Field("name", notnull=True, unique=True,
length=64, # Mayon compatibility
label = T("Name"),
requires = [IS_NOT_EMPTY(),
IS_LENGTH(64),
],
),
s3_comments(),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Skill"),
title_display = T("Skill Details"),
title_list = T("Skill Catalog"),
title_update = T("Edit Skill"),
label_list_button = T("List Skills"),
label_delete_button = T("Delete Skill"),
msg_record_created = T("Skill added"),
msg_record_modified = T("Skill updated"),
msg_record_deleted = T("Skill deleted"),
msg_list_empty = T("Currently no entries in the catalog"),
)
autocomplete = False
label_create = crud_strings[tablename].label_create
if autocomplete:
# NB FilterField widget needs fixing for that too
widget = S3AutocompleteWidget(request.controller,
"skill")
tooltip = AUTOCOMPLETE_HELP
else:
widget = None
tooltip = None
skill_help = S3PopupLink(c = c,
f = "skill",
label = label_create,
tooltip = tooltip,
)
represent = S3Represent(lookup = tablename,
translate = True,
)
skill_id = S3ReusableField("skill_id", "reference %s" % tablename,
label = T("Skill"),
ondelete = "SET NULL",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_skill.id",
represent,
sort = True
)),
sortby = "name",
comment = skill_help,
widget = widget
)
multi_skill_represent = S3Represent(lookup = tablename,
multiple = True,
)
multi_skill_id = S3ReusableField("skill_id", "list:reference hrm_skill",
label = T("Skills"),
ondelete = "SET NULL",
represent = multi_skill_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_skill.id",
represent,
sort = True,
multiple = True
)),
sortby = "name",
#comment = skill_help,
widget = S3MultiSelectWidget(header = "",
selectedList = 3,
),
)
configure("hrm_skill",
deduplicate = S3Duplicate(),
)
# =====================================================================
# Competency Ratings
#
# These are the levels of competency. Default is Levels 1-3.
# The levels can vary by skill_type if deployment_settings.hrm.skill_types = True
#
# The textual description can vary a lot, but is important to individuals
# Priority is the numeric used for preferential role allocation in Mayon
#
# http://docs.oasis-open.org/emergency/edxl-have/cs01/xPIL-types.xsd
#
tablename = "hrm_competency_rating"
define_table(tablename,
skill_type_id(empty = False),
Field("name",
length=64, # Mayon Compatibility
label = T("Name"),
requires = [IS_NOT_EMPTY(),
IS_LENGTH(64),
],
),
Field("priority", "integer",
default = 1,
label = T("Priority"),
requires = IS_INT_IN_RANGE(1, 10),
widget = S3SliderWidget(),
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Priority"),
T("Priority from 1 to 9. 1 is most preferred."),
),
),
),
s3_comments(),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Competency Rating"),
title_display = T("Competency Rating Details"),
title_list = T("Competency Rating Catalog"),
title_update = T("Edit Competency Rating"),
label_list_button = T("List Competency Ratings"),
label_delete_button = T("Delete Competency Rating"),
msg_record_created = T("Competency Rating added"),
msg_record_modified = T("Competency Rating updated"),
msg_record_deleted = T("Competency Rating deleted"),
msg_list_empty = T("Currently no entries in the catalog"),
)
represent = S3Represent(lookup = tablename,
translate = True,
)
competency_id = S3ReusableField("competency_id", "reference %s" % tablename,
label = T("Competency"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_competency_rating.id",
represent,
orderby = "hrm_competency_rating.priority desc",
sort = True,
)),
sortby = "priority",
comment = self.competency_rating_comment(),
)
configure("hrm_competency_rating",
deduplicate = self.hrm_competency_rating_duplicate,
)
# ---------------------------------------------------------------------
# Competencies
#
# Link table between Persons & Skills
# - with a competency rating & confirmation
#
# Users can add their own but these are confirmed only by specific roles
#
# Component added in the hrm person() controller
#
tablename = "hrm_competency"
define_table(tablename,
person_id(ondelete = "CASCADE"),
skill_id(ondelete = "CASCADE"),
competency_id(),
# This field can only be filled-out by specific roles
# Once this has been filled-out then the other fields are locked
organisation_id(label = T("Confirming Organization"),
comment = None,
widget = widget,
writable = False,
),
Field("from_certification", "boolean",
default = False,
readable = False,
writable = False,
),
s3_comments(),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Add Skill"),
title_display = T("Skill Details"),
title_list = T("Skills"),
title_update = T("Edit Skill"),
label_list_button = T("List Skills"),
label_delete_button = T("Remove Skill"),
msg_record_created = T("Skill added"),
msg_record_modified = T("Skill updated"),
msg_record_deleted = T("Skill removed"),
msg_list_empty = T("Currently no Skills registered"),
)
configure("hrm_competency",
context = {"person": "person_id",
},
deduplicate = S3Duplicate(primary = ("person_id",
"skill_id",
),
),
list_fields = ["id",
# Normally accessed via component
#"person_id",
"skill_id",
"competency_id",
"comments",
],
list_layout = hrm_competency_list_layout,
)
# =====================================================================
# Skill Provisions
#
# The minimum Competency levels in a Skill to be assigned the given Priority
# for allocation to Mayon's shifts for the given Job Role
#
#tablename = "hrm_skill_provision"
#define_table(tablename,
# Field("name", notnull=True, unique=True,
# length=32, # Mayon compatibility
# label = T("Name"),
# requires = [IS_NOT_EMPTY(),
# IS_LENGTH(32),
# ],
# ),
# job_title_id(),
# skill_id(),
# competency_id(),
# Field("priority", "integer",
# default = 1,
# requires = IS_INT_IN_RANGE(1, 10),
# widget = S3SliderWidget(),
# comment = DIV(_class = "tooltip",
# _title = "%s|%s" % (T("Priority"),
# T("Priority from 1 to 9. 1 is most preferred.")))
# ),
# s3_comments(),
# *s3_meta_fields())
#crud_strings[tablename] = Storage(
# label_create = T("Add Skill Provision"),
# title_display = T("Skill Provision Details"),
# title_list = T("Skill Provision Catalog"),
# title_update = T("Edit Skill Provision"),
# label_list_button = T("List Skill Provisions"),
# label_delete_button = T("Delete Skill Provision"),
# msg_record_created = T("Skill Provision added"),
# msg_record_modified = T("Skill Provision updated"),
# msg_record_deleted = T("Skill Provision deleted"),
# msg_list_empty = T("Currently no entries in the catalog"),
# )
#label_create = crud_strings[tablename].label_create
#represent = S3Represent(lookup = tablename)
#skill_group_id = S3ReusableField("skill_provision_id", "reference %s" % tablename,
# label = T("Skill Provision"),
# ondelete = "SET NULL",
# represent = represent,
# requires = IS_EMPTY_OR(
# IS_ONE_OF(db, "hrm_skill_provision.id",
# represent,
# )),
# sortby = "name",
# comment = DIV(A(label_create,
# _class = "s3_add_resource_link",
# _href = URL(f="skill_provision",
# args = "create",
# vars = {"format": "popup"},
# ),
# _target = "top",
# _title = label_create.
# ),
# DIV(_class = "tooltip",
# _title = "%s|%s" % (label_create,
# T("Add a new skill provision to the catalog."),
# ),
# ),
# ),
# )
# =========================================================================
# Courses
#
external_courses = settings.get_hrm_trainings_external()
course_pass_marks = settings.get_hrm_course_pass_marks()
hrm_course_types = settings.get_hrm_course_types()
tablename = "hrm_course"
define_table(tablename,
Field("code", length=64,
label = T("Code"),
requires = IS_LENGTH(64),
),
Field("name", length=128, notnull=True,
label = T("Name"),
represent = lambda v: T(v) if v is not None \
else NONE,
requires = [IS_NOT_EMPTY(),
IS_LENGTH(128),
],
),
# Optionally restrict to Staff/Volunteers/Members
Field("type", "integer",
label = T("Type"),
represent = s3_options_represent(hrm_course_types),
requires = IS_EMPTY_OR(IS_IN_SET(hrm_course_types)),
# Enable in Templates as-required
readable = False,
writable = False,
),
# Only included in order to be able to set
# realm_entity to filter appropriately
# @ToDo: Option to see multiple Training Centers even as non_admin
organisation_id(default = root_org,
readable = is_admin,
writable = is_admin,
),
Field("external", "boolean",
default = False,
label = T("External"),
represent = s3_yes_no_represent,
readable = external_courses,
writable = external_courses,
),
Field("hours", "integer",
label = T("Hours"),
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(0, None)
),
),
Field("pass_mark", "float",
default = 0.0,
label = T("Pass Mark"),
represent = float_represent,
requires = IS_EMPTY_OR(
IS_FLOAT_AMOUNT(minimum = 0.0)
),
readable = course_pass_marks,
writable = course_pass_marks,
),
Field("url",
label = T("URL"),
requires = IS_EMPTY_OR(
IS_URL()
),
represent = s3_url_represent,
),
s3_comments(label = T("Description"),
comment = None,
),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Course"),
title_display = T("Course Details"),
title_list = T("Course Catalog"),
title_update = T("Edit Course"),
title_upload = T("Import Courses"),
label_list_button = T("List Courses"),
label_delete_button = T("Delete Course"),
msg_record_created = T("Course added"),
msg_record_modified = T("Course updated"),
msg_record_deleted = T("Course deleted"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no entries in the catalog"),
)
if is_admin:
label_create = crud_strings[tablename].label_create
course_help = S3PopupLink(c = c,
f = "course",
label = label_create,
)
else:
course_help = None
#course_help = DIV(_class="tooltip",
# _title="%s|%s" % (T("Course"),
# AUTOCOMPLETE_HELP))
course_represent = S3Represent(lookup = tablename,
translate = True,
)
course_id = S3ReusableField("course_id", "reference %s" % tablename,
label = T("Course"),
ondelete = "RESTRICT",
represent = course_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_course.id",
course_represent,
filterby = "organisation_id",
filter_opts = filter_opts,
)),
sortby = "name",
comment = course_help,
# Comment this to use a Dropdown & not an Autocomplete
#widget = S3AutocompleteWidget("hrm", "course")
)
if settings.get_hrm_create_certificates_from_courses():
onaccept = self.hrm_course_onaccept
else:
onaccept = None
configure(tablename,
create_next = URL(f="course",
args = ["[id]", "course_certificate"],
),
deduplicate = S3Duplicate(primary = ("name",),
secondary = ("organisation_id",),
),
onaccept = onaccept,
)
# Components
add_components(tablename,
# Certificates
hrm_course_certificate = "course_id",
# Job Titles
hrm_course_job_title = "course_id",
# Sectors
org_sector = {"link": "hrm_course_sector",
"joinby": "course_id",
"key": "sector_id",
"actuate": "hide",
},
# Format for filter_widget
hrm_course_sector = "course_id",
# Trainees
hrm_training = "course_id",
)
# ---------------------------------------------------------------------
# Event Types
# - Trainings, Workshops, Meetings
#
tablename = "hrm_event_type"
define_table(tablename,
Field("name", notnull=True,
label = T("Name"),
requires = IS_NOT_EMPTY(),
),
s3_comments(),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Event Type"),
title_display = T("Event Type Details"),
title_list = T("Event Types"),
title_update = T("Edit Event Type"),
label_list_button = T("List Event Types"),
label_delete_button = T("Delete Event Type"),
msg_record_created = T("Event Type added"),
msg_record_modified = T("Event Type updated"),
msg_record_deleted = T("Event Type deleted"),
msg_list_empty = T("Currently no entries in the catalog"),
)
event_types = settings.get_hrm_event_types()
label_create = crud_strings[tablename].label_create
represent = S3Represent(lookup = tablename)
event_type_id = S3ReusableField("event_type_id", "reference %s" % tablename,
label = T("Event Type"),
ondelete = "RESTRICT",
readable = event_types,
writable = event_types,
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_event_type.id",
represent
)),
sortby = "name",
comment = S3PopupLink(c = "hrm",
f = "event_type",
label = label_create,
title = label_create,
tooltip = T("Add a new event type to the catalog."),
),
)
configure(tablename,
deduplicate = S3Duplicate(),
)
# =========================================================================
# (Training) Events
# - can include Meetings, Workshops, etc
#
#site_label = settings.get_org_site_label()
site_label = T("Venue")
course_mandatory = settings.get_hrm_event_course_mandatory()
event_site = settings.get_hrm_event_site()
# Instructor settings
INSTRUCTOR = T("Instructor")
instructors = settings.get_hrm_training_instructors()
int_instructor = ext_instructor = False
int_instructor_tooltip = None
ext_instructor_label = INSTRUCTOR
ext_instructor_tooltip = None
if instructors in ("internal", "both"):
int_instructor = True
int_instructor_tooltip = DIV(_class = "tooltip",
_title = "%s|%s" % (INSTRUCTOR,
AUTOCOMPLETE_HELP,
),
)
if instructors == "both":
ext_instructor = True
ext_instructor_label = T("External Instructor")
ext_instructor_tooltip = DIV(_class = "tooltip",
_title = "%s|%s" % (T("External Instructor"),
T("Enter the name of the external instructor"),
),
)
elif instructors == "external":
ext_instructor = True
tablename = "hrm_training_event"
define_table(tablename,
# Instance
super_link("pe_id", "pr_pentity"),
event_type_id(),
Field("name",
label = T("Name"),
readable = event_types,
writable = event_types,
),
course_id(empty = not course_mandatory),
organisation_id(label = T("Organized By")),
location_id(widget = S3LocationSelector(), # show_address = False
readable = not event_site,
writable = not event_site,
),
# Component, not instance
super_link("site_id", "org_site",
label = site_label,
instance_types = auth.org_site_types,
updateable = True,
not_filterby = "obsolete",
not_filter_opts = (True,),
default = auth.user.site_id if auth.is_logged_in() else None,
readable = event_site,
writable = event_site,
empty = not event_site,
represent = self.org_site_represent,
),
s3_datetime("start_date",
label = T("Start Date"),
min = datetime.datetime(2000, 1, 1),
set_min = "#hrm_training_event_end_date",
),
s3_datetime("end_date",
label = T("End Date"),
min = datetime.datetime(2000, 1, 1),
set_max = "#hrm_training_event_start_date",
),
# @ToDo: Auto-populate from course
Field("hours", "integer",
label = T("Hours"),
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(1, None),
),
),
person_id(label = INSTRUCTOR,
comment = int_instructor_tooltip,
readable = int_instructor,
writable = int_instructor,
),
Field("instructor",
label = ext_instructor_label,
comment = ext_instructor_tooltip,
represent = lambda s: s if s else NONE,
readable = ext_instructor,
writable = ext_instructor,
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
ADD_TRAINING_EVENT = T("Create Training Event")
crud_strings[tablename] = Storage(
label_create = ADD_TRAINING_EVENT,
title_display = T("Training Event Details"),
title_list = T("Training Events"),
title_update = T("Edit Training Event"),
title_upload = T("Import Training Events"),
label_list_button = T("List Training Events"),
label_delete_button = T("Delete Training Event"),
msg_record_created = T("Training Event added"),
msg_record_modified = T("Training Event updated"),
msg_record_deleted = T("Training Event deleted"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no training events registered"),
)
represent = hrm_TrainingEventRepresent()
training_event_id = S3ReusableField("training_event_id", "reference %s" % tablename,
label = T("Training Event"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_training_event.id",
represent,
#filterby = "organisation_id",
#filter_opts = filter_opts,
)),
sortby = "course_id",
comment = S3PopupLink(c = c,
f = "training_event",
label = ADD_TRAINING_EVENT,
),
# Comment this to use a Dropdown & not an Autocomplete
#widget = S3AutocompleteWidget("hrm", "training_event")
)
# Which levels of Hierarchy are we using?
levels = current.gis.get_relevant_hierarchy_levels()
if event_site:
filter_widgets = [S3TextFilter(["name",
"course_id$name",
"site_id$name",
"comments",
],
label = T("Search"),
comment = T("You can search by course name, venue name or event comments. You may use % as wildcard. Press 'Search' without input to list all events."),
),
S3LocationFilter("site_id$location_id",
levels = levels,
hidden = True,
),
S3OptionsFilter("site_id",
label = site_label,
hidden = True,
),
S3DateFilter("start_date",
label = T("Date"),
hide_time = True,
hidden = True,
)
]
else:
filter_widgets = [S3TextFilter(["name",
"course_id$name",
"location_id$name",
"comments",
],
label = T("Search"),
comment = T("You can search by course name, venue name or event comments. You may use % as wildcard. Press 'Search' without input to list all events."),
),
S3LocationFilter("location_id",
levels = levels,
hidden = True,
),
S3DateFilter("start_date",
label = T("Date"),
hide_time = True,
hidden = True,
)
]
# Resource Configuration
configure(tablename,
create_next = URL(f="training_event",
args = ["[id]", "participant"],
),
deduplicate = S3Duplicate(primary = ("course_id",
"start_date",
),
secondary = ("site_id",),
),
filter_widgets = filter_widgets,
realm_entity = self.hrm_training_event_realm_entity,
super_entity = "pr_pentity",
)
# Components
add_components(tablename,
gis_location = {"link": "hrm_event_location",
"joinby": "training_event_id",
"key": "location_id",
"actuate": "hide",
},
pr_person = [# Instructors
{"name": "instructor",
#"joinby": "person_id",
"link": "hrm_training_event_instructor",
"joinby": "training_event_id",
"key": "person_id",
"actuate": "hide",
},
# Participants
{"name": "participant",
"link": "hrm_training",
"joinby": "training_event_id",
"key": "person_id",
"actuate": "hide",
},
],
hrm_event_tag = "training_event_id",
# This format is better for permissions on the link table
hrm_training = "training_event_id",
# Format for list_fields
hrm_training_event_instructor = "training_event_id",
hrm_training_event_report = {"joinby": "training_event_id",
"multiple": False,
},
#project_strategy = {"link": "project_strategy_event",
# "joinby": "training_event_id",
# "key": "strategy_id",
# "actuate": "hide",
# },
#project_programme = {"link": "project_programme_event",
# "joinby": "training_event_id",
# "key": "programme_id",
# "actuate": "hide",
# },
#project_project = {"link": "project_project_event",
# "joinby": "training_event_id",
# "key": "project_id",
# "actuate": "hide",
# },
dc_target = {"link": "dc_target_event",
"joinby": "training_event_id",
"key": "target_id",
"actuate": "replace",
},
)
# =====================================================================
# Training Event Locations
# - e.g. used for showing which Locations an Event is relevant for
#
tablename = "hrm_event_location"
define_table(tablename,
training_event_id(empty = False,
ondelete = "CASCADE",
),
location_id(empty = False,
ondelete = "CASCADE",
widget = S3LocationSelector(#show_address = False,
),
),
#s3_comments(),
*s3_meta_fields())
# =====================================================================
# Training Event Tags
tablename = "hrm_event_tag"
define_table(tablename,
training_event_id(empty = False,
ondelete = "CASCADE",
),
# key is a reserved word in MySQL
Field("tag",
label = T("Key"),
),
Field("value",
label = T("Value"),
),
#s3_comments(),
*s3_meta_fields())
self.configure(tablename,
deduplicate = S3Duplicate(primary = ("training_event_id",
"tag",
),
),
)
# =====================================================================
# Training Event Report
# - this is currently configured for RMS
# (move custom labels there if need to make this more generic)
#
tablename = "hrm_training_event_report"
define_table(tablename,
# Instance
super_link("doc_id", "doc_entity"),
training_event_id(empty = False,
ondelete = "CASCADE",
),
person_id(),
self.hrm_job_title_id(label = T("Position"),
),
organisation_id(),
Field("purpose",
label = T("Training Purpose"),
),
Field("code",
label = T("Code"),
),
s3_date(label = T("Report Date")),
Field("objectives",
label = T("Objectives"),
widget = s3_comments_widget,
),
Field("methodology",
label = T("Methodology"),
widget = s3_comments_widget,
),
Field("actions",
label = T("Implemented Actions"),
widget = s3_comments_widget,
),
Field("participants",
label = T("About the participants"),
widget = s3_comments_widget,
),
Field("results",
label = T("Results and Lessons Learned"),
widget = s3_comments_widget,
),
Field("followup",
label = T("Follow-up Required"),
widget = s3_comments_widget,
),
Field("additional",
label = T("Additional relevant information"),
widget = s3_comments_widget,
),
s3_comments(label = T("General Comments")),
*s3_meta_fields())
configure(tablename,
super_entity = "doc_entity",
)
# =====================================================================
# Training Intructors
# - used if there can be multiple per-event
#
tablename = "hrm_training_event_instructor"
define_table(tablename,
training_event_id(empty = False,
ondelete = "CASCADE",
),
person_id(comment = self.pr_person_comment(INSTRUCTOR,
AUTOCOMPLETE_HELP,
child = "person_id"),
empty = False,
label = INSTRUCTOR,
ondelete = "CASCADE",
),
#s3_comments(),
*s3_meta_fields())
# =====================================================================
# (Training) Participations (Trainees)
#
# These are an element of credentials:
# - a minimum number of hours of training need to be done each year
#
# Users can add their own but these are confirmed only by specific roles
#
course_grade_opts = settings.get_hrm_course_grades()
# @ToDo: configuration setting once-required
role_opts = {1: T("Participant"),
2: T("Facilitator"),
3: T("Observer"),
}
# @ToDo: configuration setting once-required
status_opts = {1: T("Applied"),
2: T("Approved"),
3: T("Rejected"),
4: T("Invited"),
5: T("Accepted"),
6: T("Declined"),
}
tablename = "hrm_training"
define_table(tablename,
# @ToDo: Create a way to add new people to training as staff/volunteers
person_id(comment = self.pr_person_comment(
T("Participant"),
T("Type the first few characters of one of the Participant's names."),
child="person_id"),
empty = False,
ondelete = "CASCADE",
),
# Just used when created from participation in an Event
training_event_id(ondelete = "SET NULL",
readable = False,
writable = False,
),
course_id(empty = not course_mandatory,
),
Field("role", "integer",
default = 1,
label = T("Role"),
represent = s3_options_represent(role_opts),
requires = IS_EMPTY_OR(
IS_IN_SET(role_opts,
zero = None)),
# Enable in templates as-required
readable = False,
writable = False,
),
s3_datetime(),
s3_datetime("end_date",
label = T("End Date"),
),
Field("hours", "integer",
label = T("Hours"),
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(0, None)
),
),
Field("status", "integer",
default = 4, # Invited
label = T("Status"),
represent = s3_options_represent(status_opts),
requires = IS_EMPTY_OR(IS_IN_SET(status_opts)),
# Enable in templates as-required
readable = False,
writable = False,
),
# This field can only be filled-out by specific roles
# Once this has been filled-out then the other fields are locked
Field("grade", "integer",
label = T("Grade"),
represent = s3_options_represent(course_grade_opts),
requires = IS_EMPTY_OR(
IS_IN_SET(course_grade_opts,
zero = None)),
readable = False,
writable = False,
),
# Can store specific test result here & then auto-calculate the Pass/Fail
Field("grade_details", "float",
default = 0.0,
label = T("Grade Details"),
represent = float_represent,
requires = IS_EMPTY_OR(
IS_FLOAT_AMOUNT(minimum = 0.0)
),
readable = course_pass_marks,
writable = course_pass_marks,
),
Field("qualitative_feedback",
label = T("Qualitative Feedback"),
widget = s3_comments_widget,
# Enable in templates as-required
readable = False,
writable = False,
),
Field("file", "upload",
autodelete = True,
length = current.MAX_FILENAME_LENGTH,
represent = self.hrm_training_file_represent,
# upload folder needs to be visible to the download() function as well as the upload
uploadfolder = os.path.join(folder,
"uploads"),
# Enable (& label) in templates as-required
readable = False,
writable = False,
),
Field.Method("job_title", hrm_training_job_title),
Field.Method("organisation", hrm_training_organisation),
s3_comments(),
*s3_meta_fields())
# Suitable for use when adding a Training to a Person
# The ones when adding a Participant to an Event are done in the Controller
crud_strings[tablename] = Storage(
label_create = T("Add Training"),
title_display = T("Training Details"),
title_list = T("Trainings"),
title_update = T("Edit Training"),
title_report = T("Training Report"),
title_upload = T("Import Training Participants"),
label_list_button = T("List Trainings"),
label_delete_button = T("Delete Training"),
msg_record_created = T("Training added"),
msg_record_modified = T("Training updated"),
msg_record_deleted = T("Training deleted"),
msg_no_match = T("No entries found"),
msg_list_empty = T("No entries currently registered"),
)
filter_widgets = [
S3TextFilter(["person_id$first_name",
"person_id$last_name",
"course_id$name",
"training_event_id$name",
"comments",
],
label = T("Search"),
comment = T("You can search by trainee name, course name or comments. You may use % as wildcard. Press 'Search' without input to list all trainees."),
_class = "filter-search",
),
S3OptionsFilter("person_id$human_resource.organisation_id",
# Doesn't support translations
#represent = "%(name)s",
),
S3LocationFilter("person_id$location_id",
levels = levels,
),
S3OptionsFilter("course_id",
# Doesn't support translations
#represent="%(name)s",
),
S3OptionsFilter("training_event_id$site_id",
label = T("Training Facility"),
represent = self.org_site_represent,
),
S3OptionsFilter("grade",
),
S3DateFilter("date",
hide_time = True,
),
]
# NB training_event_controller overrides these for Participants
list_fields = ["course_id",
"person_id",
#(T("Job Title"), "job_title"),
(ORGANISATION, "organisation"),
"grade",
]
if course_pass_marks:
list_fields.append("grade_details")
list_fields.append("date")
report_fields = [(T("Training Event"), "training_event_id"),
"person_id",
"course_id",
"grade",
(ORGANISATION, "organisation"),
(T("Facility"), "training_event_id$site_id"),
(T("Month"), "month"),
(T("Year"), "year"),
]
rappend = report_fields.append
for level in levels:
rappend("person_id$location_id$%s" % level)
report_options = Storage(rows = report_fields,
cols = report_fields,
fact = report_fields,
methods = ["count", "list"],
defaults = Storage(
rows = "training.course_id",
cols = "training.month",
fact = "count(training.person_id)",
totals = True,
)
)
# Resource Configuration
configure(tablename,
context = {"person": "person_id",
},
deduplicate = S3Duplicate(primary = ("person_id",
"course_id",
),
secondary = ("date",),
),
filter_widgets = filter_widgets,
list_fields = list_fields,
list_layout = hrm_training_list_layout,
onaccept = hrm_training_onaccept,
ondelete = hrm_training_onaccept,
# Only used in Imports
#onvalidation = hrm_training_onvalidation,
orderby = "hrm_training.date desc",
report_options = report_options,
)
# Components
add_components(tablename,
hrm_certification = {"name": "certification_from_training", # Distinguish from that linked to the Person
"joinby": "training_id",
"multiple": False,
},
)
# =====================================================================
# Trainings
#
# A list:reference table to support Contains queries:
# - people who have attended both Course A & Course B
#
tablename = "hrm_trainings"
define_table(tablename,
person_id(empty = False,
ondelete = "CASCADE",
),
Field("course_id", "list:reference hrm_course",
label = T("Courses Attended"),
ondelete = "SET NULL",
represent = S3Represent(lookup = "hrm_course",
multiple = True,
translate = True,
),
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_course.id",
course_represent,
sort = True,
multiple = True,
)),
widget = S3MultiSelectWidget(header = "",
selectedList = 3,
),
),
*s3_meta_fields())
# =====================================================================
# Certificates
#
# NB Some Orgs will only trust the certificates of some Orgs
# - we currently make no attempt to manage this trust chain
#
filter_certs = settings.get_hrm_filter_certificates()
if filter_certs:
label = ORGANISATION
else:
label = T("Certifying Organization")
tablename = "hrm_certificate"
define_table(tablename,
Field("name", notnull=True,
length=128, # Mayon Compatibility
label = T("Name"),
requires = [IS_NOT_EMPTY(),
IS_LENGTH(128),
],
),
organisation_id(default = root_org if filter_certs else None,
label = label,
readable = is_admin or not filter_certs,
writable = is_admin or not filter_certs,
widget = widget,
),
Field("expiry", "integer",
label = T("Expiry (months)"),
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(1, None)
),
),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Certificate"),
title_display = T("Certificate Details"),
title_list = T("Certificate Catalog"),
title_update = T("Edit Certificate"),
title_upload = T("Import Certificates"),
label_list_button = T("List Certificates"),
label_delete_button = T("Delete Certificate"),
msg_record_created = T("Certificate added"),
msg_record_modified = T("Certificate updated"),
msg_record_deleted = T("Certificate deleted"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no entries in the catalog"),
)
label_create = crud_strings[tablename].label_create
represent = S3Represent(lookup = tablename)
certificate_id = S3ReusableField("certificate_id", "reference %s" % tablename,
label = T("Certificate"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_certificate.id",
represent,
filterby = "organisation_id" if filter_certs else None,
filter_opts = filter_opts
)),
sortby = "name",
comment = S3PopupLink(c = c,
f = "certificate",
label = label_create,
title = label_create,
tooltip = T("Add a new certificate to the catalog."),
),
)
if settings.get_hrm_use_skills():
create_next = URL(f="certificate",
args=["[id]", "certificate_skill"])
else:
create_next = None
configure(tablename,
create_next = create_next,
deduplicate = S3Duplicate(),
)
# Components
add_components(tablename,
hrm_certificate_skill = "certificate_id",
)
# =====================================================================
# Certifications
#
# Link table between Persons & Certificates
#
# These are an element of credentials
#
tablename = "hrm_certification"
define_table(tablename,
person_id(empty = False,
ondelete = "CASCADE",
),
certificate_id(empty = False,
),
# @ToDo: Option to auto-generate (like Waybills: SiteCode-CourseCode-UniqueNumber)
Field("number",
label = T("License Number"),
),
#Field("status", label = T("Status")),
s3_date(label = T("Expiry Date")),
Field("image", "upload",
autodelete = True,
label = T("Scanned Copy"),
length = current.MAX_FILENAME_LENGTH,
# upload folder needs to be visible to the download() function as well as the upload
uploadfolder = os.path.join(folder,
"uploads"),
),
# This field can only be filled-out by specific roles
# Once this has been filled-out then the other fields are locked
organisation_id(comment = None,
label = T("Confirming Organization"),
widget = widget,
writable = False,
),
# Optional: When certification comes from a training
Field("training_id", "reference hrm_training",
readable = False,
writable = False,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_training.id",
)),
),
s3_comments(),
*s3_meta_fields())
configure(tablename,
context = {"person": "person_id",
},
list_fields = ["certificate_id",
"number",
"date",
#"comments",
],
onaccept = self.hrm_certification_onaccept,
ondelete = self.hrm_certification_onaccept,
)
crud_strings[tablename] = Storage(
label_create = T("Add Certification"),
title_display = T("Certification Details"),
title_list = T("Certifications"),
title_update = T("Edit Certification"),
label_list_button = T("List Certifications"),
label_delete_button = T("Delete Certification"),
msg_record_created = T("Certification added"),
msg_record_modified = T("Certification updated"),
msg_record_deleted = T("Certification deleted"),
msg_no_match = T("No entries found"),
msg_list_empty = T("No entries currently registered"),
)
# =====================================================================
# Credentials
#
# This determines whether an Organisation believes a person is suitable
# to fulfil a role. It is determined based on a combination of
# experience, training & a performance rating (medical fitness to come).
# @ToDo: Workflow to make this easy for the person doing the credentialling
#
# http://www.dhs.gov/xlibrary/assets/st-credentialing-interoperability.pdf
#
# Component added in the hrm person() controller
#
# Used by Courses
# & 6-monthly rating (Portuguese Bombeiros)
hrm_pass_fail_opts = {8: T("Pass"),
9: T("Fail"),
}
# 12-monthly rating (Portuguese Bombeiros)
# - this is used to determine rank progression (need 4-5 for 5 years)
#hrm_five_rating_opts = {1: T("Poor"),
# 2: T("Fair"),
# 3: T("Good"),
# 4: T("Very Good"),
# 5: T("Excellent"),
# }
# Lookup to represent both sorts of ratings
hrm_performance_opts = {1: T("Poor"),
2: T("Fair"),
3: T("Good"),
4: T("Very Good"),
5: T("Excellent"),
8: T("Pass"),
9: T("Fail"),
}
tablename = "hrm_credential"
define_table(tablename,
person_id(ondelete = "CASCADE"),
job_title_id(),
organisation_id(label = T("Credentialling Organization"),
widget = widget,
),
Field("performance_rating", "integer",
label = T("Performance Rating"),
represent = s3_options_represent(hrm_performance_opts),
# Default to pass/fail (can override to 5-levels in Controller)
# @ToDo: Build this onaccept of hrm_appraisal
requires = IS_EMPTY_OR(IS_IN_SET(hrm_pass_fail_opts)),
),
s3_date("start_date",
default = "now",
label = T("Date Received"),
set_min = "#hrm_credential_end_date",
),
s3_date("end_date",
label = T("Expiry Date"),
set_max = "#hrm_credential_start_date",
start_field = "hrm_credential_start_date",
default_interval = 12,
default_explicit = True,
),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Add Credential"),
title_display = T("Credential Details"),
title_list = T("Credentials"),
title_update = T("Edit Credential"),
label_list_button = T("List Credentials"),
label_delete_button = T("Delete Credential"),
msg_record_created = T("Credential added"),
msg_record_modified = T("Credential updated"),
msg_record_deleted = T("Credential deleted"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no Credentials registered"),
)
configure(tablename,
context = {"person": "person_id",
},
list_fields = ["job_title_id",
"start_date",
"end_date",
],
list_layout = hrm_credential_list_layout,
)
# =====================================================================
# Skill Equivalence
#
# Link table between Certificates & Skills
#
# Used to auto-populate the relevant skills
# - faster than runtime joins at a cost of data integrity
#
tablename = "hrm_certificate_skill"
define_table(tablename,
certificate_id(empty = False,
ondelete = "CASCADE",
),
skill_id(empty = False,
ondelete = "CASCADE",
),
competency_id(),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Add Skill Equivalence"),
title_display = T("Skill Equivalence Details"),
title_list = T("Skill Equivalences"),
title_update = T("Edit Skill Equivalence"),
label_list_button = T("List Skill Equivalences"),
label_delete_button = T("Delete Skill Equivalence"),
msg_record_created = T("Skill Equivalence added"),
msg_record_modified = T("Skill Equivalence updated"),
msg_record_deleted = T("Skill Equivalence deleted"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no Skill Equivalences registered"),
)
# =====================================================================
# Course Certificates
#
# Link table between Courses & Certificates
#
# Used to auto-populate the relevant certificates
# - faster than runtime joins at a cost of data integrity
#
tablename = "hrm_course_certificate"
define_table(tablename,
course_id(empty = False,
ondelete = "CASCADE",
),
certificate_id(empty = False,
ondelete = "CASCADE",
),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Add Certificate for Course"),
title_display = T("Course Certificate Details"),
title_list = T("Course Certificates"),
title_update = T("Edit Course Certificate"),
label_list_button = T("List Course Certificates"),
label_delete_button = T("Delete Course Certificate"),
msg_record_created = T("Course Certificate added"),
msg_record_modified = T("Course Certificate updated"),
msg_record_deleted = T("Course Certificate deleted"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no Course Certificates registered"),
)
# =====================================================================
# Course <> Job Titles link table
#
# Show which courses a person has done that are relevant to specific job roles
#
tablename = "hrm_course_job_title"
define_table(tablename,
course_id(empty = False,
ondelete = "CASCADE",
),
job_title_id(empty = False,
ondelete = "CASCADE",
),
*s3_meta_fields())
# =====================================================================
# Course <> Sectors link table
#
# Show which courses a person has done that are relevant to specific sectors
#
tablename = "hrm_course_sector"
define_table(tablename,
course_id(empty = False,
ondelete = "CASCADE",
),
self.org_sector_id(empty = False,
ondelete = "CASCADE",
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {#"hrm_competency_id": competency_id,
"hrm_course_id": course_id,
"hrm_skill_id": skill_id,
"hrm_multi_skill_id": multi_skill_id,
"hrm_multi_skill_represent": multi_skill_represent,
"hrm_training_event_id": training_event_id,
"hrm_certification_onaccept": self.hrm_certification_onaccept,
}
# -------------------------------------------------------------------------
@staticmethod
def defaults():
"""
Return safe defaults in case the model has been deactivated.
"""
dummy = S3ReusableField.dummy
return {#"hrm_competency_id": dummy("competency_id"),
"hrm_course_id": dummy("course_id"),
"hrm_skill_id": dummy("skill_id"),
"hrm_multi_skill_id": dummy("skill_id", "list:reference"),
}
# -------------------------------------------------------------------------
@staticmethod
def skill_type_default():
""" Lookup the default skill_type """
if current.deployment_settings.get_hrm_skill_types():
# We have many - don't set a default
default = None
else:
# We don't use skill_types so find the default
db = current.db
table = db.hrm_skill_type
skill_type = db(table.deleted == False).select(table.id,
limitby = (0, 1),
).first()
try:
default = skill_type.id
except AttributeError:
# Create a default skill_type
default = table.insert(name = "Default")
return default
# -------------------------------------------------------------------------
@staticmethod
def competency_rating_comment():
""" Define the comment for the HRM Competency Rating widget """
T = current.T
s3 = current.response.s3
if current.request.controller == "vol":
controller = "vol"
else:
controller = "hrm"
if current.auth.s3_has_role(current.session.s3.system_roles.ADMIN):
label_create = s3.crud_strings["hrm_competency_rating"].label_create
comment = S3PopupLink(c = controller,
f = "competency_rating",
vars = {"child":"competency_id"},
label = label_create,
tooltip = T("Add a new competency rating to the catalog."),
)
else:
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Competency Rating"),
T("Level of competency this person has with this skill."),
),
)
if current.deployment_settings.get_hrm_skill_types():
script = \
'''$.filterOptionsS3({
'trigger':'skill_id',
'target':'competency_id',
'lookupResource':'competency',
'lookupURL':S3.Ap.concat('/%s/skill_competencies/'),
'msgNoRecords':'%s'
})''' % (controller, T("No Ratings for Skill Type"))
comment = TAG[""](comment,
S3ScriptItem(script = script))
return comment
# -------------------------------------------------------------------------
@staticmethod
def hrm_course_onaccept(form):
"""
Ensure that there is a Certificate created for each Course
- only called when create_certificates_from_courses in (True, "organisation_id")
"""
form_vars = form.vars
course_id = form_vars.id
db = current.db
s3db = current.s3db
ltable = s3db.hrm_course_certificate
exists = db(ltable.course_id == course_id).select(ltable.id,
limitby = (0, 1),
)
if not exists:
name = form_vars.get("name")
organisation_id = form_vars.get("organisation_id")
if not name or not organisation_id:
table = s3db.hrm_course
course = db(table.id == course_id).select(table.name,
table.organisation_id,
limitby = (0, 1),
).first()
name = course.name
organisation_id = course.organisation_id
ctable = s3db.hrm_certificate
certificate = db(ctable.name == name).select(ctable.id,
limitby = (0, 1),
).first()
if certificate:
certificate_id = certificate.id
else:
if current.deployment_settings.get_hrm_create_certificates_from_courses() is True:
# Don't limit to Org
organisation_id = None
certificate_id = ctable.insert(name = name,
organisation_id = organisation_id,
)
ltable.insert(course_id = course_id,
certificate_id = certificate_id,
)
# -------------------------------------------------------------------------
@staticmethod
def hrm_certification_onaccept(form):
"""
Ensure that Skills are Populated from Certifications
- called both onaccept & ondelete
"""
# Deletion and update have a different format
delete = False
try:
record_id = form.vars.id
except AttributeError:
# Delete
record_id = form.id
delete = True
# Read the full record
db = current.db
table = db.hrm_certification
record = db(table.id == record_id).select(table.person_id,
table.training_id,
table.number,
limitby = (0, 1),
).first()
if delete:
person_id = form.person_id
training_id = form.training_id
else:
person_id = record.person_id
training_id = record.training_id
if not person_id:
# This record is being created as a direct component of the Training,
# in order to set the Number (RMS usecase).
# Find the other record (created onaccept of training)
query = (table.training_id == training_id) & \
(table.id != record_id)
original = db(query).select(table.id,
limitby = (0, 1),
).first()
if original:
# Update it with the number
number = record.number
original.update_record(number = number)
# Delete this extraneous record
db(table.id == record_id).delete()
# Don't update any competencies
return
ctable = db.hrm_competency
cstable = db.hrm_certificate_skill
# Drop all existing competencies which came from certification
# - this is a lot easier than selective deletion
# @ToDo: Avoid this method as it will break Inline Component Updates
# if we ever use those (see hrm_training_onaccept)
query = (ctable.person_id == person_id) & \
(ctable.from_certification == True)
db(query).delete()
# Figure out which competencies we're _supposed_ to have.
# FIXME unlimited select
query = (table.person_id == person_id) & \
(table.certificate_id == cstable.certificate_id) & \
(cstable.skill_id == db.hrm_skill.id)
certifications = db(query).select()
# Add these competencies back in.
# FIXME unlimited select inside loop
# FIXME multiple implicit db queries inside nested loop
# FIXME db.delete inside nested loop
# FIXME unnecessary select (sub-select in Python loop)
for certification in certifications:
skill = certification["hrm_skill"]
cert = certification["hrm_certificate_skill"]
query = (ctable.person_id == person_id) & \
(ctable.skill_id == skill.id)
existing = db(query).select()
better = True
for e in existing:
if e.competency_id.priority > cert.competency_id.priority:
db(ctable.id == e.id).delete()
else:
better = False
break
if better:
ctable.update_or_insert(person_id = person_id,
competency_id = cert.competency_id,
skill_id = skill.id,
comments = "Added by certification",
from_certification = True,
)
# -------------------------------------------------------------------------
@staticmethod
def hrm_competency_rating_duplicate(item):
"""
This callback will be called when importing records
it will look to see if the record being imported is a duplicate.
@param item: An S3ImportItem object which includes all the details
of the record being imported
If the record is a duplicate then it will set the item method to update
Rules for finding a duplicate:
- Look for a record with the same name, ignoring case and skill_type
"""
name = item.data.get("name")
skill = False
for citem in item.components:
if citem.tablename == "hrm_skill_type":
cdata = citem.data
if "name" in cdata:
skill = cdata.name
if skill == False:
return
table = item.table
stable = current.s3db.hrm_skill_type
query = (table.name.lower() == s3_str(name).lower()) & \
(table.skill_type_id == stable.id) & \
(stable.value.lower() == s3_str(skill).lower())
duplicate = current.db(query).select(table.id,
limitby = (0, 1),
).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def hrm_training_file_represent(value):
""" File representation """
if value:
try:
# Read the filename from the field value
filename = current.db.hrm_training.file.retrieve(value)[0]
except IOError:
return current.T("File not found")
else:
return A(filename,
_href = URL(c="default", f="download",
args = [value],
))
else:
return NONE
# -------------------------------------------------------------------------
@staticmethod
def hrm_training_event_realm_entity(table, record):
"""
Set the training_event realm entity
- to the root Org of the Site
"""
db = current.db
stable = db.org_site
query = (stable.site_id == record.site_id)
if current.deployment_settings.get_org_branches():
site = db(query).select(stable.organisation_id,
limitby = (0, 1),
).first()
if site:
org_id = site.organisation_id
root_org = current.cache.ram(
# Common key for all users of this org & vol_service_record()
"root_org_%s" % org_id,
lambda: current.s3db.org_root_organisation(org_id),
time_expire = 120
)
otable = db.org_organisation
org = db(otable.id == root_org).select(otable.realm_entity,
limitby = (0, 1),
).first()
if org:
return org.realm_entity
else:
otable = db.org_organisation
query &= (stable.organisation_id == otable.id)
org = db(query).select(otable.realm_entity,
limitby = (0, 1),
).first()
if org:
return org.realm_entity
return None
# =============================================================================
def hrm_training_onvalidation(form):
"""
If the Training is created from a Training Event (e.g. during Import),
then auto-populate the fields from that
"""
form_vars = form.vars
training_event_id = form_vars.get("training_event_id", None)
if not training_event_id:
# Nothing to do
return
db = current.db
table = db.hrm_training_event
record = db(table.id == training_event_id).select(table.course_id,
table.start_date,
table.end_date,
table.hours,
cache = current.s3db.cache,
limitby = (0, 1),
).first()
try:
form_vars.course_id = record.course_id
form_vars.date = record.start_date
form_vars.end_date = record.end_date
form_vars.hours = record.hours
except AttributeError:
# Record not found
return
# =============================================================================
def hrm_training_onaccept(form):
"""
Ensure that Certifications, Hours & list:Trainings are Populated from Trainings
Provide a Pass/Fail rating based on the Course's Pass Mark
- called both onaccept & ondelete
"""
# Deletion and update have a different format
delete = False
try:
training_id = form.vars.id
except AttributeError:
training_id = form.id
delete = True
# Get the full record
db = current.db
table = db.hrm_training
record = db(table.id == training_id).select(table.id,
table.person_id,
table.course_id,
table.date,
table.hours,
table.grade,
table.grade_details,
limitby = (0, 1),
).first()
if delete:
course_id = form.course_id
person_id = form.person_id
else:
course_id = record.course_id
person_id = record.person_id
s3db = current.s3db
course_table = db.hrm_course
settings = current.deployment_settings
if course_id:
course_pass_marks = settings.get_hrm_course_pass_marks()
if course_pass_marks and not record.grade and record.grade_details:
# Provide a Pass/Fail rating based on the Course's Pass Mark
course = db(course_table.id == course_id).select(course_table.pass_mark,
limitby = (0, 1),
).first()
if course:
if record.grade_details >= course.pass_mark:
# Pass
record.update_record(grade = 8)
else:
# Fail
record.update_record(grade = 9)
vol_experience = settings.get_hrm_vol_experience()
if vol_experience in ("programme", "both"):
# Check if this person is a volunteer
hrtable = db.hrm_human_resource
query = (hrtable.person_id == person_id) & \
(hrtable.deleted == False)
vol = db(query).select(hrtable.type,
limitby = (0, 1),
).first()
if vol and vol.type == 2:
# Update Hours
ptable = s3db.hrm_programme_hours
query = (ptable.training_id == training_id)
if delete:
resource = s3db.resource("hrm_programme_hours", filter=query)
# Automatically propagates to Active Status
resource.delete()
else:
date = record.date
hours = record.hours
# Update or Insert?
exists = db(query).select(ptable.id,
ptable.date,
ptable.hours,
limitby = (0, 1),
).first()
if exists:
if date != exists.date or \
hours != exists.hours:
db(query).update(date = date,
hours = hours,
)
ph_id = exists.id
else:
# Nothing to propagate
ph_id = None
else:
ph_id = ptable.insert(training_id = training_id,
person_id = person_id,
date = date,
hours = hours,
training = True,
)
if ph_id:
# Propagate to Active Status
form = Storage()
form.vars = Storage()
form.vars.id = ph_id
hrm_programme_hours_onaccept(form)
# Update Trainings list:reference for Contains filter
ltable = db.hrm_trainings
query = (table.person_id == person_id) & \
(table.deleted == False)
courses = db(query).select(table.course_id,
distinct = True,
)
courses = [c.course_id for c in courses if c.course_id is not None]
exists = db(ltable.person_id == person_id).select(ltable.id,
limitby = (0, 1),
).first()
if exists:
exists.update_record(course_id = courses)
else:
ltable.insert(person_id = person_id,
course_id = courses,
)
# Update Certifications
ctable = db.hrm_certification
ltable = db.hrm_course_certificate
# Old: Breaks Inline Component Updates since record_id changes
# Drop all existing certifications which came from trainings
# - this is a lot easier than selective deletion.
if delete:
# Remove certifications if provided by this training and no other
# training led to it
query = (ctable.training_id == training_id) & \
(ctable.deleted == False)
certifications = db(query).select(ctable.id,
ctable.certificate_id)
for certification in certifications:
query = (ltable.certificate_id == certification.certificate_id) & \
(ltable.deleted == False) & \
(ltable.course_id == table.course_id) & \
(table.deleted == False)
trainings = db(query).select(table.id,
table.date,
limitby = (0, 1),
orderby = "date desc",
)
if trainings:
# Update the training_id
certification.update_record(training_id = trainings.first().id)
else:
# Remove the certification
query = (ctable.id == certification.id)
resource = s3db.resource("hrm_certification", filter=query)
# Automatically propagates to Skills
resource.delete()
else:
if course_id:
# Which certificates does this course give?
query = (ltable.course_id == course_id) & \
(ltable.deleted == False)
certificates = db(query).select(ltable.certificate_id)
# Lookup user_id to allow the user to see their certifications
ptable = db.pr_person
putable = s3db.pr_person_user
query = (ptable.id == person_id) & \
(putable.pe_id == ptable.pe_id)
user = db(query).select(putable.user_id,
limitby = (0, 1),
).first()
if user:
user_id = user.user_id
else:
# Record has no special ownership
user_id = None
# Add any missing certifications
hrm_certification_onaccept = s3db.hrm_certification_onaccept
for certificate in certificates:
certification_id = ctable.update_or_insert(person_id = person_id,
certificate_id = certificate.certificate_id,
training_id = training_id,
comments = "Added by training",
owned_by_user = user_id,
)
# Propagate to Skills
form = Storage()
form.vars = Storage()
form.vars.id = certification_id
hrm_certification_onaccept(form)
# =============================================================================
class HRAppraisalModel(S3Model):
"""
Appraisal for an HR
- can be for a specific Mission or routine annual appraisal
"""
names = ("hrm_appraisal",
"hrm_appraisal_document",
)
def model(self):
T = current.T
configure = self.configure
define_table = self.define_table
person_id = self.pr_person_id
if current.deployment_settings.get_org_autocomplete():
org_widget = S3OrganisationAutocompleteWidget(default_from_profile = True)
else:
org_widget = None
# =====================================================================
# Appraisal
#
tablename = "hrm_appraisal"
define_table(tablename,
person_id(),
# For Mission or Event
Field("code",
label = T("Code"),
readable = False,
writable = False,
),
self.org_organisation_id(widget = org_widget),
self.hrm_job_title_id(),
s3_date(),
Field("rating", "float",
label = T("Rating"),
# @ToDo: make this configurable
# 1 to 4
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(1, 5)
),
widget = S3SliderWidget(step = 0.1,
type = "float",
),
),
person_id("supervisor_id",
label = T("Supervisor"),
widget = S3AddPersonWidget(),
),
s3_comments(),
*s3_meta_fields())
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Appraisal"),
title_display = T("Appraisal Details"),
title_list = T("Appraisals"),
title_update = T("Edit Appraisal"),
label_list_button = T("List of Appraisals"),
label_delete_button = T("Delete Appraisal"),
msg_record_created = T("Appraisal added"),
msg_record_modified = T("Appraisal updated"),
msg_record_deleted = T("Appraisal deleted"),
msg_no_match = T("No Appraisals found"),
msg_list_empty = T("Currently no Appraisals entered"),
)
crud_form = S3SQLCustomForm("organisation_id",
"job_title_id",
"date",
"rating",
"supervisor_id",
S3SQLInlineComponent("document",
label = T("Files"),
link = False,
fields = ["file"],
),
"comments",
)
configure(tablename,
context = {"person": "person_id",
#"organisation": "organisation_id",
},
create_onaccept = self.hrm_appraisal_create_onaccept,
crud_form = crud_form,
list_fields = [# Normally accessed via component
#"person_id",
"date",
"organisation_id",
"job_title_id",
"supervisor_id",
"comments",
"document.file",
],
#list_layout = hrm_render_appraisal,
orderby = "hrm_appraisal.date desc",
)
# Components
self.add_components(tablename,
# Appraisal Documents
doc_document={"link": "hrm_appraisal_document",
"joinby": "appraisal_id",
"key": "document_id",
"autodelete": False,
},
)
# =====================================================================
# Appraisal Documents
#
tablename = "hrm_appraisal_document"
define_table(tablename,
Field("appraisal_id", "reference hrm_appraisal"),
self.doc_document_id(empty = False),
*s3_meta_fields())
configure(tablename,
onaccept = self.hrm_appraisal_document_onaccept,
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# -------------------------------------------------------------------------
@staticmethod
def hrm_appraisal_create_onaccept(form):
"""
Link Appraisal to Assignment
"""
mission_id = current.request.get_vars.get("mission_id", None)
if not mission_id:
return
record_id = form.vars.id
db = current.db
s3db = current.s3db
atable = s3db.deploy_assignment
hatable = db.hrm_appraisal
hrtable = db.hrm_human_resource
query = (hatable.id == record_id) & \
(hrtable.person_id == hatable.person_id) & \
(atable.human_resource_id == hrtable.id) & \
(atable.mission_id == mission_id)
assignment = db(query).select(atable.id,
limitby = (0, 1),
).first()
if not assignment:
return
db.deploy_assignment_appraisal.insert(assignment_id = assignment.id,
appraisal_id = record_id,
)
# -------------------------------------------------------------------------
@staticmethod
def hrm_appraisal_document_onaccept(form):
"""
Set the doc_id to that of the HRM, so that it also appears there
"""
db = current.db
s3db = current.s3db
atable = db.hrm_appraisal
ltable = db.hrm_appraisal_document
htable = s3db.hrm_human_resource
query = (ltable.id == form.vars.id) & \
(ltable.appraisal_id == atable.id) & \
(atable.person_id == htable.person_id) & \
(htable.deleted != False)
row = db(query).select(htable.doc_id,
ltable.document_id,
limitby = (0, 1),
).first()
if row:
document_id = row["hrm_appraisal_document.document_id"]
doc_id = row["hrm_human_resource.doc_id"]
db(db.doc_document.id == document_id).update(doc_id = doc_id)
# =============================================================================
class HRExperienceModel(S3Model):
"""
Record a person's work experience
"""
names = ("hrm_experience",)
def model(self):
T = current.T
person_id = self.pr_person_id
settings = current.deployment_settings
if settings.get_org_autocomplete():
org_widget = S3OrganisationAutocompleteWidget(default_from_profile = True)
else:
org_widget = None
site_label = settings.get_org_site_label()
if settings.get_org_site_autocomplete():
site_widget = S3SiteAutocompleteWidget()
site_comment = DIV(_class = "tooltip",
_title = "%s|%s" % (site_label,
current.messages.AUTOCOMPLETE_HELP,
),
)
else:
site_widget = None
site_comment = None
# =====================================================================
# Professional Experience (Mission Record)
#
# These are an element of credentials:
# - a minimum number of hours of active duty need to be done
# (e.g. every 6 months for Portuguese Bombeiros)
#
# This should be auto-populated out of Events
# - as well as being updateable manually for off-system Events
#
hr_type = self.hrm_human_resource.type
activity_types = settings.get_hrm_activity_types()
if not isinstance(activity_types, dict):
activity_type_requires = None
activity_type_represent = None
use_activity_types = False
else:
activity_type_opts = {} #{"other": T("Other")}
for k, v in activity_types.items():
activity_type_opts[k] = T(v)
activity_type_requires = IS_EMPTY_OR(IS_IN_SET(activity_type_opts))
activity_type_represent = s3_options_represent(activity_type_opts)
use_activity_types = True
tablename = "hrm_experience"
self.define_table(tablename,
person_id(ondelete = "CASCADE",
),
# Employment type (staff or volunteer)
Field("employment_type", "integer",
default = hr_type.default,
represent = hr_type.represent,
requires = hr_type.requires,
),
# Activity type (e.g. "RDRT Mission")
Field("activity_type",
represent = activity_type_represent,
requires = activity_type_requires,
# Expose only when there are options defined
readable = use_activity_types,
writable = use_activity_types,
),
# For Events
Field("code",
label = T("Code"),
readable = False,
writable = False,
),
self.org_organisation_id(widget = org_widget),
self.hrm_department_id(readable = False,
writable = False,
),
# Alternate free-text form especially suitable for volunteers
Field("organisation",
label = T("Organization"),
readable = False,
writable = False,
),
# Component, not instance
self.super_link("site_id", "org_site",
comment = site_comment,
label = site_label,
orderby = "org_site.name",
#readable = True,
represent = self.org_site_represent,
widget = site_widget,
#writable = True,
),
self.hrm_job_title_id(),
# Alternate free-text form especially suitable for volunteers
Field("job_title",
label = T("Position"),
readable = False,
writable = False,
),
Field("responsibilities",
label = T("Key Responsibilities"),
),
s3_date("start_date",
label = T("Start Date"),
set_min = "#hrm_experience_end_date",
),
s3_date("end_date",
label = T("End Date"),
set_max = "#hrm_experience_start_date",
start_field = "hrm_experience_start_date",
default_interval = 12,
),
Field("hours", "float",
label = T("Hours"),
),
#Field("place",
# label = T("Place"),
# ),
self.gis_location_id(),
person_id("supervisor_id",
label = T("Supervisor"),
widget = S3AddPersonWidget(),
),
s3_comments(),
*s3_meta_fields())
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Professional Experience"),
title_display = T("Professional Experience Details"),
title_list = T("Professional Experience"),
title_update = T("Edit Professional Experience"),
label_list_button = T("List of Professional Experience"),
label_delete_button = T("Delete Professional Experience"),
msg_record_created = T("Professional Experience added"),
msg_record_modified = T("Professional Experience updated"),
msg_record_deleted = T("Professional Experience deleted"),
msg_no_match = T("No Professional Experience found"),
msg_list_empty = T("Currently no Professional Experience entered"),
)
self.configure(tablename,
context = {"person": "person_id",
"organisation": "organisation_id",
},
list_fields = [# Normally accessed via component
#"person_id",
"start_date",
"end_date",
"organisation_id",
"employment_type",
"job_title_id",
"location_id",
"comments",
],
list_layout = hrm_experience_list_layout,
orderby = "hrm_experience.start_date desc",
)
# Components
self.add_components(tablename,
# Assignments
deploy_assignment = {"name": "assignment",
"link": "deploy_assignment_experience",
"joinby": "experience_id",
"key": "assignment_id",
"autodelete": False,
},
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# =============================================================================
class HRAwardModel(S3Model):
""" Data model for staff awards """
names = ("hrm_award_type",
"hrm_award",
)
def model(self):
T = current.T
db = current.db
define_table = self.define_table
# =====================================================================
# Award types
#
tablename = "hrm_award_type"
define_table(tablename,
self.org_organisation_id(
requires = self.org_organisation_requires(updateable=True),
),
Field("name",
label = T("Award Type"),
),
*s3_meta_fields())
self.configure(tablename,
deduplicate = S3Duplicate(primary = ("name",
"organisation_id",
),
),
)
ADD_AWARD_TYPE = T("Create Award Type")
award_type_represent = hrm_OrgSpecificTypeRepresent(lookup = tablename)
# =====================================================================
# Awards
#
tablename = "hrm_award"
define_table(tablename,
self.pr_person_id(),
s3_date(),
Field("awarding_body",
label = T("Awarding Body"),
),
Field("award_type_id", "reference hrm_award_type",
label = T("Award Type"),
represent = award_type_represent,
requires = IS_ONE_OF(db, "hrm_award_type.id",
award_type_represent,
),
comment = S3PopupLink(f = "award_type",
label = ADD_AWARD_TYPE,
),
),
*s3_meta_fields())
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Award"),
title_display = T("Award Details"),
title_list = T("Awards"),
title_update = T("Edit Award"),
label_list_button = T("List Awards"),
label_delete_button = T("Delete Award"),
msg_record_created = T("Award added"),
msg_record_modified = T("Award updated"),
msg_record_deleted = T("Award removed"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no awards registered"),
)
# Pass names back to global scope (s3.*)
return {}
# =============================================================================
class HRDisciplinaryActionModel(S3Model):
""" Data model for staff disciplinary record """
names = ("hrm_disciplinary_type",
"hrm_disciplinary_action",
)
def model(self):
T = current.T
define_table = self.define_table
# =====================================================================
# Types of disciplinary action
#
tablename = "hrm_disciplinary_type"
define_table(tablename,
self.org_organisation_id(
requires = self.org_organisation_requires(updateable=True),
),
Field("name",
label = T("Disciplinary Action Type"),
),
s3_comments(),
*s3_meta_fields())
self.configure(tablename,
deduplicate = S3Duplicate(primary = ("name",
"organisation_id",
),
),
)
disciplinary_type_represent = hrm_OrgSpecificTypeRepresent(lookup=tablename)
# =====================================================================
# Disciplinary record
tablename = "hrm_disciplinary_action"
define_table(tablename,
self.pr_person_id(),
s3_date(),
Field("disciplinary_body"),
Field("disciplinary_type_id", "reference hrm_disciplinary_type",
label = T("Disciplinary Action Type"),
represent = disciplinary_type_represent,
requires = IS_ONE_OF(current.db,
"hrm_disciplinary_type.id",
disciplinary_type_represent,
),
comment = S3PopupLink(f = "disciplinary_type",
label = T("Add Disciplinary Action Type"),
),
),
s3_comments(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# =============================================================================
class HRTagModel(S3Model):
""" Arbitrary Key:Value Tags for Human Resources """
names = ("hrm_human_resource_tag",
)
def model(self):
T = current.T
# =====================================================================
# Human Resource Tags
#
tablename = "hrm_human_resource_tag"
self.define_table(tablename,
self.hrm_human_resource_id(empty = False,
ondelete = "CASCADE",
),
# key is a reserved word in MySQL
Field("tag",
label = T("Key"),
),
Field("value",
label = T("Value"),
),
s3_comments(),
*s3_meta_fields())
self.configure(tablename,
deduplicate = S3Duplicate(primary = ("human_resource_id",
"tag",
),
),
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# =============================================================================
class HRProgrammeModel(S3Model):
"""
Programmes
- record Volunteer Hours
- categorise (Training) Events
These are separate to the Project module's Programmes
- @ToDo: setting to make them the same?
"""
names = ("hrm_programme",
"hrm_programme_hours",
"hrm_programme_id",
)
def model(self):
T = current.T
db = current.db
auth = current.auth
ADMIN = current.session.s3.system_roles.ADMIN
is_admin = auth.s3_has_role(ADMIN)
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
root_org = auth.root_org()
# =====================================================================
# Progammes
#
tablename = "hrm_programme"
define_table(tablename,
Field("name", notnull=True, length=64,
label = T("Name"),
represent = T,
requires = [IS_NOT_EMPTY(),
IS_LENGTH(64),
],
),
Field("name_long",
label = T("Long Name"),
),
# Only included in order to be able to set
# realm_entity to filter appropriately
self.org_organisation_id(default = root_org,
readable = is_admin,
writable = is_admin,
),
s3_comments(comment = None,
label = T("Description"),
),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Program"),
title_display = T("Program Details"),
title_list = T("Programs"),
title_update = T("Edit Program"),
label_list_button = T("List Programs"),
label_delete_button = T("Delete Program"),
msg_record_created = T("Program added"),
msg_record_modified = T("Program updated"),
msg_record_deleted = T("Program deleted"),
msg_list_empty = T("Currently no programs registered"),
)
label_create = crud_strings[tablename].label_create
if is_admin:
filter_opts = ()
elif root_org:
filter_opts = (root_org, None)
else:
filter_opts = (None,)
represent = S3Represent(lookup = tablename,
translate = True,
)
programme_id = S3ReusableField("programme_id", "reference %s" % tablename,
label = T("Program"),
ondelete = "SET NULL",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_programme.id",
represent,
filterby = "organisation_id",
filter_opts = filter_opts,
)),
sortby = "name",
comment = S3PopupLink(f = "programme",
label = label_create,
title = label_create,
tooltip = T("Add a new program to the catalog."),
),
)
configure(tablename,
deduplicate = S3Duplicate(primary = ("name",
"organisation_id",
),
),
)
# Components
self.add_components(tablename,
hrm_programme_hours = {"name": "person",
"joinby": "programme_id",
},
# Uncomment if-required for reporting
#hrm_training_event = {"link": "hrm_event_programme",
# "joinby": "programme_id",
# "key": "training_event_id",
# "actuate": "hide",
# },
)
# =====================================================================
# Programmes <> Persons Link Table
#
vol_roles = current.deployment_settings.get_hrm_vol_roles()
tablename = "hrm_programme_hours"
define_table(tablename,
self.pr_person_id(ondelete = "CASCADE",
represent = self.pr_PersonRepresent(show_link = True)
),
programme_id(),
self.hrm_job_title_id(readable = vol_roles,
writable = vol_roles,
),
Field("contract",
label = T("Contract Number"),
# Enable in templates as-required
readable = False,
writable = False,
),
Field("event",
label = T("Event Name"),
# Enable in templates as-required
readable = False,
writable = False,
),
Field("place",
label = T("Place"),
# Enable in templates as-required
readable = False,
writable = False,
),
s3_date(default = "now",
future = 0,
),
s3_date("end_date",
label = T("End Date"),
),
Field("hours", "float",
label = T("Hours"),
),
# Training records are auto-populated
Field("training", "boolean",
default = False,
label = T("Type"),
represent = lambda opt: \
T("Training") if opt else T("Work"),
writable = False,
),
Field("training_id", self.hrm_training,
label = T("Course"),
represent = hrm_TrainingRepresent(),
writable = False,
),
Field.Method("month", hrm_programme_hours_month),
s3_comments(comment = None),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Add Hours"),
title_display = T("Hours Details"),
title_list = T("Hours"),
title_update = T("Edit Hours"),
title_upload = T("Import Hours"),
label_list_button = T("List Hours"),
label_delete_button = T("Delete Hours"),
msg_record_created = T("Hours added"),
msg_record_modified = T("Hours updated"),
msg_record_deleted = T("Hours deleted"),
msg_list_empty = T("Currently no hours recorded for this volunteer"),
)
filter_widgets = [
S3OptionsFilter("person_id$human_resource.organisation_id",
# Doesn't support translations
#represent="%(name)s",
),
S3OptionsFilter("programme_id",
# Doesn't support translation
#represent = "%(name)s",
),
S3OptionsFilter("job_title_id",
#label = T("Volunteer Role"),
# Doesn't support translation
#represent = "%(name)s",
),
S3DateFilter("date",
hide_time = True,
),
]
report_fields = ["training",
"programme_id",
"job_title_id",
"training_id",
(T("Month"), "month"),
"hours",
"person_id$gender",
]
report_options = Storage(rows = report_fields,
cols = report_fields,
fact = report_fields,
defaults = Storage(rows = "programme_id",
cols = "month",
fact = "sum(hours)",
totals = True,
)
)
configure(tablename,
context = {"person": "person_id",
},
extra_fields = ["date"],
filter_widgets = filter_widgets,
list_fields = ["training",
"programme_id",
"job_title_id",
"training_id",
"date",
"hours",
],
onaccept = hrm_programme_hours_onaccept,
ondelete = hrm_programme_hours_onaccept,
orderby = "hrm_programme_hours.date desc",
report_options = report_options,
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {"hrm_programme_id": programme_id,
}
# =============================================================================
class HRShiftModel(S3Model):
"""
Shifts
"""
names = ("hrm_shift_template",
"hrm_shift",
"hrm_shift_id",
"hrm_human_resource_shift",
)
def model(self):
T = current.T
#configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
set_method = self.set_method
job_title_id = self.hrm_job_title_id
skill_id = self.hrm_skill_id
db = current.db
DAYS_OF_WEEK = {1: T("Monday"),
2: T("Tuesday"),
3: T("Wednesday"),
4: T("Thursday"),
5: T("Friday"),
6: T("Saturday"),
7: T("Sunday"),
}
# ---------------------------------------------------------------------
# Shift Templates
#
tablename = "hrm_shift_template"
define_table(tablename,
job_title_id(),
skill_id(),
Field("day_of_week", "integer",
represent = s3_options_represent(DAYS_OF_WEEK),
requires = IS_IN_SET(DAYS_OF_WEEK),
),
s3_time("start_time",
empty = False,
label = T("Start Time"),
# Could be the next day
#set_min = "#hrm_shift_template_end_time",
),
s3_time("end_time",
empty = False,
label = T("End Time"),
# Could be the next day
#set_max = "#hrm_shift_template_start_time",
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("New Shift"),
title_display = T("Shift Details"),
title_list = T("Shifts"),
title_update = T("Edit Shift"),
#title_upload = T("Import Shift data"),
label_list_button = T("List Shifts"),
msg_record_created = T("Shift added"),
msg_record_modified = T("Shift updated"),
msg_record_deleted = T("Shift deleted"),
msg_list_empty = T("No Shifts defined"),
)
# ---------------------------------------------------------------------
# Shifts
#
tablename = "hrm_shift"
define_table(tablename,
job_title_id(),
skill_id(),
s3_datetime("start_date",
label = T("Start Date"),
set_min = "#hrm_shift_end_date",
),
s3_datetime("end_date",
label = T("End Date"),
set_max = "#hrm_shift_start_date",
),
s3_comments(),
*s3_meta_fields())
represent = S3Represent(lookup = tablename,
fields = ["start_date", "end_date"],
)
shift_id = S3ReusableField("shift_id", "reference %s" % tablename,
label = T("Shift"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_shift.id",
represent,
)),
comment = S3PopupLink(c = "hrm",
f = "shift",
label = T("Create Shift"),
),
)
self.add_components(tablename,
hrm_human_resource_shift = {"joinby": "shift_id",
"multiple": False,
}
)
crud_form = S3SQLCustomForm("job_title_id",
"skill_id",
"start_date",
"end_date",
"comments",
(T("Assigned"), "human_resource_shift.human_resource_id"),
)
list_fields = ["job_title_id",
"skill_id",
"start_date",
"end_date",
"comments",
(T("Assigned"), "human_resource_shift.human_resource_id"),
]
self.configure(tablename,
crud_form = crud_form,
list_fields = list_fields,
)
# Custom Method to Assign HRs
STAFF = current.deployment_settings.get_hrm_staff_label()
filter_widgets = [S3DateFilter("available",
label = T("Available"),
# Use custom selector to prevent automatic
# parsing (which would result in an error)
selector = "available",
hide_time = False,
),
#if settings.get_hrm_use_skills():
S3OptionsFilter("competency.skill_id",
# Better to default (easier to customise/consistency)
#label = T("Skill"),
),
S3OptionsFilter("job_title_id",
),
S3OptionsFilter("type",
label = T("Type"),
options = {1: STAFF,
2: T("Volunteer"),
},
cols = 2,
hidden = True,
),
]
#if settings.get_hrm_multiple_orgs():
# if settings.get_org_branches():
# append_filter(S3HierarchyFilter("organisation_id",
# leafonly = False,
# ))
# else:
# append_filter(S3OptionsFilter("organisation_id",
# search = True,
# header = "",
# #hidden = True,
# ))
list_fields = ["person_id",
"job_title_id",
"start_date",
(T("Skills"), "person_id$competency.skill_id"),
]
set_method("hrm", "shift",
method = "assign",
action = self.hrm_AssignMethod(component = "human_resource_shift",
next_tab = "facility",
filter_widgets = filter_widgets,
list_fields = list_fields,
rheader = hrm_rheader,
))
def facility_redirect(r, **attr):
"""
Redirect to the Facility's Shifts tab
"""
s3db = current.s3db
# Find the Facility
ltable = s3db.org_site_shift
ftable = s3db.org_facility
query = (ltable.shift_id == r.id) & \
(ltable.site_id == ftable.site_id)
facility = current.db(query).select(ftable.id,
limitby = (0, 1),
).first()
redirect(URL(c = "org",
f = "facility",
args = [facility.id, "shift"],
))
set_method("hrm", "shift",
method = "facility",
action = facility_redirect)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("New Shift"),
title_display = T("Shift Details"),
title_list = T("Shifts"),
title_update = T("Edit Shift"),
#title_upload = T("Import Shift data"),
label_list_button = T("List Shifts"),
msg_record_created = T("Shift added"),
msg_record_modified = T("Shift updated"),
msg_record_deleted = T("Shift deleted"),
msg_list_empty = T("No Shifts defined"),
)
# ---------------------------------------------------------------------
# Shifts <> Human Resources
#
# @ToDo: Replace with hrm_shift_person as it's the Person who should be
# busy, not just the HR
#
tablename = "hrm_human_resource_shift"
define_table(tablename,
shift_id(),
self.hrm_human_resource_id(writable = False),
#s3_comments(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {"hrm_shift_id": shift_id,
}
# -------------------------------------------------------------------------
@staticmethod
def defaults():
"""
Return safe defaults in case the model has been deactivated.
"""
return {"hrm_shift_id": S3ReusableField.dummy("shift_id"),
}
# =============================================================================
class HRDelegationModel(S3Model):
"""
Model to manage delegations of staff/volunteers to other
organisations.
"""
names = ("hrm_delegation",
"hrm_delegation_status_opts",
"hrm_delegation_message",
)
def model(self):
T = current.T
db = current.db
s3 = current.response.s3
define_table = self.define_table
crud_strings = s3.crud_strings
# ---------------------------------------------------------------------
# Delegation Statuses
#
workflow = current.deployment_settings.get_hrm_delegation_workflow()
if isinstance(workflow, (tuple, list)) and len(workflow):
# Custom workflow
delegation_status = workflow
else:
if workflow == "Invitation":
# Invitation workflow:
# Other organisation invites the delegate, who then accepts
delegation_status = (("INVT", T("Invited")),
("ACPT", T("Accepted")),
("RJCT", T("Rejected")),
)
elif workflow == "Application":
# Application workflow:
# Person applies for the delegation, which is then accepted
delegation_status = (("APPL", T("Applied")),
("ACPT", T("Accepted")),
("RJCT", T("Rejected")),
)
else:
# Request workflow:
# Other organisation requests the delegate, which is then
# approved by the managing organisation
delegation_status = (("REQ", T("Requested")),
("APPR", T("Approved")),
("DECL", T("Declined")),
)
# Final statuses
delegation_status += (("CANC", T("Cancelled")),
("IMPL", T("Implemented")),
("NVLD", T("Invalid")),
)
# ---------------------------------------------------------------------
# Delegation
#
tablename = "hrm_delegation"
define_table(tablename,
self.org_organisation_id(
empty = False,
comment = DIV(_class = "tooltip",
# TODO tooltip depends on workflow
_title = "%s|%s" % (T("Requesting Organisation"),
T("The organisation requesting the delegation"),
),
),
),
self.super_link("site_id", "org_site",
orderby = "org_site.name",
represent = self.org_site_represent,
),
self.pr_person_id(
empty = False,
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Person"),
T("The person to be delegated"),
),
),
),
s3_date(label = T("Start Date"),
set_min = "#hrm_delegation_end_date",
),
s3_date("end_date",
label = T("End Date"),
set_max = "#hrm_delegation_date",
),
s3_datetime("requested_on",
label = T("Requested on"),
default = "now",
writable = False,
),
Field("status",
default = delegation_status[0],
requires = IS_IN_SET(delegation_status,
zero = None,
sort = False,
),
represent = s3_options_represent(dict(delegation_status)),
),
# Enable in template if/as required:
Field("hours_per_week", "integer",
label = T("Hours per week"),
readable = False,
writable = False,
),
s3_comments(),
*s3_meta_fields())
# Components
self.add_components(tablename,
hrm_delegation_message = "delegation_id",
hrm_delegation_note = "delegation_id",
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Delegation"),
title_display = T("Delegation Details"),
title_list = T("Delegations"),
title_update = T("Edit Delegation"),
label_list_button = T("List Delegations"),
label_delete_button = T("Delete Delegation"),
msg_record_created = T("Delegation created"),
msg_record_modified = T("Delegation updated"),
msg_record_deleted = T("Delegation deleted"),
msg_list_empty = T("No Delegations currently registered"),
)
# ---------------------------------------------------------------------
# Messages exchanged in connection with a delegation
#
message_status = {"SENT": T("Sent"),
"FAILED": T("Failed"),
}
tablename = "hrm_delegation_message"
define_table(tablename,
Field("delegation_id", "reference hrm_delegation",
ondelete = "CASCADE",
readable = False,
writable = False,
),
s3_date(default="now"),
Field("recipient",
label = T("Recipient"),
),
Field("subject",
label = T("Subject"),
),
Field("message", "text",
label = T("Message"),
represent = s3_text_represent,
),
Field("status",
default = "SENT",
label = T("Status"),
requires = IS_IN_SET(message_status,
zero = None,
),
represent = s3_options_represent(message_status),
writable = False,
),
s3_comments(),
*s3_meta_fields())
# List fields
list_fields = ["date",
"recipient",
"subject",
"message",
"status",
]
# Table configuration
self.configure(tablename,
list_fields = list_fields,
insertable = False,
deletable = False,
editable = False,
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Message"),
title_display = T("Message Details"),
title_list = T("Messages"),
title_update = T("Edit Message"),
label_list_button = T("List Messages"),
label_delete_button = T("Delete Message"),
msg_record_created = T("Message created"),
msg_record_modified = T("Message updated"),
msg_record_deleted = T("Message deleted"),
msg_list_empty = T("No Messages currently registered"),
)
# ---------------------------------------------------------------------
# Simple notes journal for delegations
#
tablename = "hrm_delegation_note"
define_table(tablename,
Field("delegation_id", "reference hrm_delegation",
ondelete = "CASCADE",
readable = False,
writable = False,
),
s3_date(default="now"),
Field("note", "text",
label = T("Note"),
represent = s3_text_represent,
),
*s3_meta_fields())
# List fields
list_fields = ["date",
(T("Author"), "modified_by"),
"note",
]
# Table configuration
self.configure(tablename,
list_fields = list_fields,
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Note"),
title_display = T("Note Details"),
title_list = T("Notes"),
title_update = T("Edit Note"),
label_list_button = T("List Notes"),
label_delete_button = T("Delete Note"),
msg_record_created = T("Note added"),
msg_record_modified = T("Note updated"),
msg_record_deleted = T("Note deleted"),
msg_list_empty = T("No Notes currently registered"),
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {"hrm_delegation_status_opts": delegation_status,
}
# -------------------------------------------------------------------------
@staticmethod
def defaults():
""" Safe defaults for names in case the module is disabled """
#dummy = S3ReusableField.dummy
return {"hrm_delegation_status_opts": {}}
# =============================================================================
def hrm_programme_hours_month(row):
"""
Virtual field for hrm_programme_hours - returns the date of the first
day of the month of this entry, used for programme hours report.
Requires "date" to be in the additional report_fields
@param row: the Row
"""
try:
thisdate = row["hrm_programme_hours.date"]
except AttributeError:
return NONE
if not thisdate:
return NONE
#thisdate = thisdate.date()
month = thisdate.month
year = thisdate.year
first = datetime.date(year, month, 1)
return first.strftime("%y-%m")
# =============================================================================
def hrm_programme_hours_onaccept(form):
"""
Update the Active Status for the volunteer
- called both onaccept & ondelete
"""
vol_active = current.deployment_settings.get_hrm_vol_active()
if not callable(vol_active):
# Nothing to do (either field is disabled or else set manually)
return
# Deletion and update have a different format
delete = False
try:
record_id = form.vars.id
except AttributeError:
record_id = form.id
delete = True
db = current.db
if delete:
person_id = form.person_id
else:
# Get the full record
table = db.hrm_programme_hours
record = db(table.id == record_id).select(table.person_id,
limitby = (0, 1),
).first()
person_id = record.person_id
# Recalculate the Active Status for this Volunteer
active = vol_active(person_id)
# Read the current value
s3db = current.s3db
dtable = s3db.vol_details
htable = s3db.hrm_human_resource
query = (htable.person_id == person_id) & \
(dtable.human_resource_id == htable.id)
row = db(query).select(dtable.id,
dtable.active,
limitby = (0, 1),
).first()
if row:
if row.active != active:
# Update
db(dtable.id == row.id).update(active = active)
else:
# Create record
row = db(htable.person_id == person_id).select(htable.id,
limitby = (0, 1),
).first()
if row:
dtable.insert(human_resource_id = row.id,
active = active,
)
# =============================================================================
class hrm_AssignMethod(S3Method):
"""
Custom Method to allow human resources to be assigned to something
e.g. Incident, Project, Site, Vehicle
@ToDo: be able to filter by deployable status for the role
"""
# -------------------------------------------------------------------------
def __init__(self,
component,
next_tab = "human_resource",
types = None,
filter_widgets = None,
list_fields = None,
rheader = None,
):
"""
@param component: the Component in which to create records
@param next_tab: the component/method to redirect to after assigning
@param types: a list of types to pick from: Staff, Volunteers, Deployables
@param filter_widgets: a custom list of FilterWidgets to show
@param list_fields: a custom list of Fields to show
@param rheader: an rheader to show
"""
super(hrm_AssignMethod, self).__init__()
self.component = component
self.next_tab = next_tab
self.types = types
self.filter_widgets = filter_widgets
self.list_fields = list_fields
self.rheader = rheader
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Apply method.
@param r: the S3Request
@param attr: controller options for this request
"""
try:
component = r.resource.components[self.component]
except KeyError:
current.log.error("Invalid Component!")
raise
if component.link:
component = component.link
tablename = component.tablename
# Requires permission to create component
authorised = current.auth.s3_has_permission("create", tablename)
if not authorised:
r.unauthorised()
settings = current.deployment_settings
types = self.types
if not types:
if settings.has_module("vol"):
types = (1, 2)
else:
# Staff
types = (1,)
if types == (2,):
controller = "vol"
else:
controller = "hrm"
T = current.T
db = current.db
s3db = current.s3db
table = s3db[tablename]
fkey = component.fkey
record = r.record
if fkey in record:
# SuperKey
record_id = record[fkey]
else:
record_id = r.id
get_vars = r.get_vars
response = current.response
output = None
if r.http == "POST":
added = 0
post_vars = r.post_vars
if all([n in post_vars for n in ("assign", "selected", "mode")]):
selected = post_vars.selected
if selected:
selected = selected.split(",")
else:
selected = []
if post_vars.mode == "Exclusive":
# 'Select All' ticked or all rows selected manually
if "filterURL" in post_vars:
filters = S3URLQuery.parse_url(post_vars.filterURL)
else:
filters = None
query = ~(FS("id").belongs(selected))
resource = s3db.resource("hrm_human_resource",
alias = self.component,
filter = query,
vars = filters)
rows = resource.select(["id"], as_rows=True)
selected = [str(row.id) for row in rows]
if component.multiple:
# Prevent multiple entries in the link table
query = (table.human_resource_id.belongs(selected)) & \
(table[fkey] == record_id) & \
(table.deleted != True)
rows = db(query).select(table.id)
rows = dict((row.id, row) for row in rows)
onaccept = component.get_config("create_onaccept",
component.get_config("onaccept", None))
for human_resource_id in selected:
try:
hr_id = int(human_resource_id.strip())
except ValueError:
continue
if hr_id not in rows:
link = Storage(human_resource_id = human_resource_id)
link[fkey] = record_id
_id = table.insert(**link)
if onaccept:
link["id"] = _id
form = Storage(vars = link)
onaccept(form)
added += 1
else:
human_resource_id = selected[0]
exists = db(table[fkey] == record_id).select(table.id,
limitby = (0, 1),
).first()
if exists:
onaccept = component.get_config("update_onaccept",
component.get_config("onaccept", None))
exists.update_record(human_resource_id = human_resource_id)
if onaccept:
link = Storage(id = exists.id,
human_resource_id = human_resource_id)
link[fkey] = record_id
form = Storage(vars = link)
onaccept(form)
else:
onaccept = component.get_config("create_onaccept",
component.get_config("onaccept", None))
link = Storage(human_resource_id = human_resource_id)
link[fkey] = record_id
_id = table.insert(**link)
if onaccept:
link["id"] = _id
form = Storage(vars = link)
onaccept(form)
added += 1
if r.representation == "popup":
# Don't redirect, so we retain popup extension & so close popup
response.confirmation = T("%(number)s assigned") % \
{"number": added}
output = {}
else:
current.session.confirmation = T("%(number)s assigned") % \
{"number": added}
if added > 0:
redirect(URL(args = [r.id, self.next_tab],
vars = {},
))
else:
redirect(URL(args = r.args,
vars = {},
))
elif r.http == "GET":
representation = r.representation
# Filter widgets
if self.filter_widgets is not None:
filter_widgets = self.filter_widgets
else:
if controller == "vol":
resource_type = "volunteer"
elif len(types) == 1:
resource_type = "staff"
else:
# Both
resource_type = None
if r.controller == "req":
module = "req"
else:
module = controller
filter_widgets = hrm_human_resource_filters(resource_type = resource_type,
module = module)
# List fields
if self.list_fields is not None:
list_fields = self.list_fields
else:
list_fields = ["person_id",
"organisation_id",
]
if len(types) == 2:
list_fields.append((T("Type"), "type"))
list_fields.append("job_title_id")
if settings.get_hrm_use_certificates():
list_fields.append((T("Certificates"), "person_id$certification.certificate_id"))
if settings.get_hrm_use_skills():
list_fields.append((T("Skills"), "person_id$competency.skill_id"))
if settings.get_hrm_use_trainings():
list_fields.append((T("Trainings"), "person_id$training.course_id"))
# Data table
resource = s3db.resource("hrm_human_resource",
alias = r.component.alias if r.component else None,
vars = get_vars)
totalrows = resource.count()
if "pageLength" in get_vars:
display_length = get_vars["pageLength"]
if display_length == "None":
display_length = None
else:
display_length = int(display_length)
else:
display_length = 25
if display_length:
limit = 4 * display_length
else:
limit = None
filter_, orderby, left = resource.datatable_filter(list_fields,
get_vars)
resource.add_filter(filter_)
# Hide people already in the link table
query = (table[fkey] == record_id) & \
(table.deleted != True)
rows = db(query).select(table.human_resource_id)
already = [row.human_resource_id for row in rows]
filter_ = (~db.hrm_human_resource.id.belongs(already))
resource.add_filter(filter_)
ajax_vars = dict(get_vars)
if settings.get_hrm_unavailability():
apply_availability_filter = False
if get_vars.get("available__ge") or \
get_vars.get("available__le"):
apply_availability_filter = True
elif representation != "aadata":
available_defaults = response.s3.filter_defaults["hrm_human_resource"]["available"]
if available_defaults:
apply_availability_filter = True
ge = available_defaults.get("ge")
if ge is not None:
ajax_vars["available__ge"] = s3_format_datetime(ge) # Used by dt_ajax_url
get_vars["available__ge"] = s3_format_datetime(ge) # Popped in pr_availability_filter
le = available_defaults.get("le")
if le is not None:
ajax_vars["available__le"] = s3_format_datetime(le) # Used by dt_ajax_url
get_vars["available__le"] = s3_format_datetime(le) # Popped in pr_availability_filter
if apply_availability_filter:
# Apply availability filter
request = Storage(get_vars = get_vars,
resource = resource,
tablename = "hrm_human_resource",
)
s3db.pr_availability_filter(request)
dt_id = "datatable"
# Bulk actions
dt_bulk_actions = [(T("Assign"), "assign")]
if representation in ("html", "popup"):
# Page load
resource.configure(deletable = False)
profile_url = URL(c = controller,
f = "human_resource",
args = ["[id]", "profile"],
)
S3CRUD.action_buttons(r,
deletable = False,
read_url = profile_url,
update_url = profile_url,
)
response.s3.no_formats = True
# Filter form
if filter_widgets:
# Where to retrieve filtered data from:
submit_url_vars = resource.crud._remove_filters(r.get_vars)
filter_submit_url = r.url(vars = submit_url_vars)
# Default Filters (before selecting data!)
resource.configure(filter_widgets = filter_widgets)
S3FilterForm.apply_filter_defaults(r, resource)
# Where to retrieve updated filter options from:
filter_ajax_url = URL(f = "human_resource",
args = ["filter.options"],
vars = {},
)
get_config = resource.get_config
filter_clear = get_config("filter_clear", True)
filter_formstyle = get_config("filter_formstyle", None)
filter_submit = get_config("filter_submit", True)
filter_form = S3FilterForm(filter_widgets,
clear = filter_clear,
formstyle = filter_formstyle,
submit = filter_submit,
ajax = True,
url = filter_submit_url,
ajaxurl = filter_ajax_url,
_class = "filter-form",
_id = "datatable-filter-form",
)
fresource = current.s3db.resource(resource.tablename)
alias = r.component.alias if r.component else None
ff = filter_form.html(fresource,
r.get_vars,
target = "datatable",
alias = alias)
else:
ff = ""
# Data table (items)
data = resource.select(list_fields,
start = 0,
limit = limit,
orderby = orderby,
left = left,
count = True,
represent = True)
filteredrows = data["numrows"]
dt = S3DataTable(data["rfields"], data["rows"])
items = dt.html(totalrows,
filteredrows,
dt_id,
dt_ajax_url = r.url(representation = "aadata",
vars = ajax_vars),
dt_bulk_actions = dt_bulk_actions,
dt_bulk_single = not component.multiple,
dt_pageLength = display_length,
dt_pagination = "true",
dt_searching = "false",
)
STAFF = settings.get_hrm_staff_label()
response.view = "list_filter.html"
rheader = self.rheader
if callable(rheader):
rheader = rheader(r)
output = {"items": items,
"title": T("Assign %(staff)s") % {"staff": STAFF},
"list_filter_form": ff,
"rheader": rheader,
}
elif representation == "aadata":
# Ajax refresh
if "draw" in get_vars:
echo = int(get_vars.draw)
else:
echo = None
data = resource.select(list_fields,
start = 0,
limit = limit,
orderby = orderby,
left = left,
count = True,
represent = True)
filteredrows = data["numrows"]
dt = S3DataTable(data["rfields"], data["rows"])
items = dt.json(totalrows,
filteredrows,
dt_id,
echo,
dt_bulk_actions = dt_bulk_actions)
response.headers["Content-Type"] = "application/json"
output = items
else:
r.error(415, current.ERROR.BAD_FORMAT)
else:
r.error(405, current.ERROR.BAD_METHOD)
return output
# =============================================================================
class hrm_HumanResourceRepresent(S3Represent):
""" Representation of human resource IDs """
def __init__(self, show_link=False):
"""
Constructor
@param show_link: whether to add a URL to representations
"""
super(hrm_HumanResourceRepresent, self).__init__(lookup = "hrm_human_resource",
show_link = show_link)
self.job_title_represent = S3Represent(lookup = "hrm_job_title")
self.types = {}
# -------------------------------------------------------------------------
def link(self, k, v, row=None):
"""
Represent a (key, value) as hypertext link
@param k: the key (hrm_human_resource.id)
@param v: the representation of the key
@param row: the row with this key (unused here)
"""
# Link to specific controller for type
types = self.types
if types.get(k) == 1:
url = URL(c="hrm", f="staff", args=[k])
else:
url = URL(c="vol", f="volunteer", args=[k])
return A(v, _href = url)
# -------------------------------------------------------------------------
def lookup_rows(self, key, values, fields=None):
"""
Custom rows lookup
@param key: the key Field
@param values: the values
@param fields: unused (retained for API compatibility)
"""
s3db = current.s3db
htable = s3db.hrm_human_resource
ptable = s3db.pr_person
left = ptable.on(ptable.id == htable.person_id)
count = len(values)
if count == 1:
query = (key == values[0])
else:
query = key.belongs(values)
rows = current.db(query).select(htable.id,
htable.job_title_id,
htable.organisation_id,
htable.type,
ptable.first_name,
ptable.middle_name,
ptable.last_name,
limitby = (0, count),
left = left,
)
self.queries += 1
# Remember HR types
types = self.types
for row in rows:
types[row["hrm_human_resource.id"]] = row["hrm_human_resource.type"]
# Bulk-represent job_title_ids
job_title_id = str(htable.job_title_id)
job_title_ids = [row[job_title_id] for row in rows]
if job_title_ids:
self.job_title_represent.bulk(job_title_ids)
# Bulk-represent organisation_ids
if current.deployment_settings.get_hrm_show_organisation():
organisation_id = str(htable.organisation_id)
organisation_ids = [row[organisation_id] for row in rows]
if organisation_ids:
htable.organisation_id.represent.bulk(organisation_ids)
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a row
@param row: the Row
"""
# Start with the person name
representation = [s3_str(s3_fullname(row.pr_person))]
append = representation.append
hr = row.hrm_human_resource
# Append the job title if present
if hr.job_title_id:
append(self.job_title_represent(hr.job_title_id, show_link=False))
# Append the organisation if present (and configured)
if hr.organisation_id and \
current.deployment_settings.get_hrm_show_organisation():
htable = current.s3db.hrm_human_resource
append(htable.organisation_id.represent(hr.organisation_id,
show_link = False))
return ", ".join(representation)
# =============================================================================
class hrm_TrainingRepresent(S3Represent):
"""
Represent a Training by its Course
- used from within hrm_programme_hours
"""
def __init__(self):
"""
Constructor
"""
super(hrm_TrainingRepresent, self).__init__(lookup = "hrm_training")
# -------------------------------------------------------------------------
def lookup_rows(self, key, values, fields=None):
"""
Custom rows lookup
@param key: the key Field
@param values: the values
@param fields: unused (retained for API compatibility)
"""
ttable = self.table
ctable = current.s3db.hrm_course
left = [ctable.on(ctable.id == ttable.course_id)]
if len(values) == 1:
query = (key == values[0])
else:
query = key.belongs(values)
rows = current.db(query).select(ttable.id,
ctable.name,
left = left,
)
self.queries += 1
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
name = row["hrm_course.name"]
if not name:
name = NONE
return name
# =============================================================================
class hrm_TrainingEventRepresent(S3Represent):
""" Representation of training_event_id """
def __init__(self):
"""
Constructor
"""
super(hrm_TrainingEventRepresent, self).__init__(lookup = "hrm_training_event")
# -------------------------------------------------------------------------
def lookup_rows(self, key, values, fields=None, pe_id=False):
"""
Custom rows lookup
@param key: the key Field
@param values: the values
@param fields: unused (retained for API compatibility)
@param pe_id: whether to include pe_id in the output rows
(True when called from pr_PersonEntityRepresent)
"""
s3db = current.s3db
etable = self.table
ctable = s3db.hrm_course
stable = s3db.org_site
left = [ctable.on(ctable.id == etable.course_id),
stable.on(stable.site_id == etable.site_id),
]
if len(values) == 1:
query = (key == values[0])
else:
query = key.belongs(values)
fields = [etable.id,
etable.name,
etable.start_date,
etable.instructor,
etable.person_id,
ctable.name,
ctable.code,
stable.name,
]
if pe_id:
fields.insert(0, etable.pe_id)
rows = current.db(query).select(*fields,
left = left,
)
instructors = current.deployment_settings.get_hrm_training_instructors()
if instructors in ("internal", "both"):
# Bulk-represent internal instructors to suppress
# per-row DB lookups in represent_row:
key = str(etable.person_id)
etable.person_id.represent.bulk([row[key] for row in rows])
self.queries += 1
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a row
NB This needs to be machine-parseable by training.xsl
@param row: the Row
"""
# Do we have a Name?
name = row.get("hrm_training_event.name")
if name:
return name
# Course Details
course = row.get("hrm_course")
if not course:
return NONE
name = course.get("name")
if not name:
name = NONE
representation = ["%s --" % name]
append = representation.append
code = course.get("code")
if code:
append("(%s)" % code)
# Venue and instructor
event = row.hrm_training_event
try:
site = row.org_site.name
except AttributeError:
site = None
instructors = current.deployment_settings.get_hrm_training_instructors()
instructor = None
if instructors in ("internal", "both"):
person_id = event.get("person_id")
if person_id:
instructor = self.table.person_id.represent(person_id)
if instructor is None and instructors in ("external", "both"):
instructor = event.get("instructor")
if instructor and site:
append("%s - {%s}" % (instructor, site))
elif instructor:
append("%s" % instructor)
elif site:
append("{%s}" % site)
# Start date
start_date = event.start_date
if start_date:
# Easier for users & machines
start_date = S3DateTime.date_represent(start_date, format="%Y-%m-%d")
append("[%s]" % start_date)
return " ".join(representation)
# =============================================================================
#def hrm_position_represent(id, row=None):
# """
# """
# if row:
# id = row.id
# elif not id:
# return NONE
# db = current.db
# s3db = current.s3db
# table = s3db.hrm_position
# jtable = s3db.hrm_job_title
# otable = s3db.org_organisation
# query = (table.id == id) & \
# (table.job_title_id == jtable.id)
# (table.organisation_id == otable.id)
# position = db(query).select(jtable.name,
# otable.name,
# limitby = (0, 1),
# ).first()
# try:
# represent = position.hrm_job_title.name
# if position.org_organisation:
# represent = "%s (%s)" % (represent,
# position.org_organisation.name)
# except:
# return NONE
# return represent
#
# =============================================================================
def hrm_human_resource_onaccept(form):
""" On-accept for HR records """
if "vars" in form:
# e.g. coming from staff/create
form_vars = form.vars
elif "id" in form:
# e.g. coming from user/create or from hrm_site_onaccept or req_onaccept
form_vars = form
elif hasattr(form, "vars"):
# SQLFORM e.g. ?
form_vars = form.vars
else:
# e.g. Coming from s3_register callback
form_vars = form
record_id = form_vars.get("id")
if not record_id:
return
db = current.db
s3db = current.s3db
auth = current.auth
request = current.request
settings = current.deployment_settings
# Get the 'full' record
htable = db.hrm_human_resource
record = db(htable.id == record_id).select(htable.id, # needed for update_record
htable.type,
htable.person_id,
htable.organisation_id,
htable.location_id,
htable.job_title_id,
htable.site_id,
htable.site_contact,
htable.status,
htable.deleted,
limitby = (0, 1),
).first()
job_title_id = record.job_title_id
if job_title_id and settings.get_hrm_multiple_job_titles():
# Update the link table
ltable = db.hrm_job_title_human_resource
query = (ltable.human_resource_id == record_id) & \
(ltable.job_title_id == job_title_id)
exists = db(query).select(ltable.id, # needed for update_record
ltable.main,
limitby = (0, 1),
).first()
if exists:
if not exists.main:
exists.update_record(main = True)
else:
# Insert record
ltable.insert(human_resource_id = record_id,
job_title_id = job_title_id,
main = True,
start_date = request.utcnow,
)
data = Storage()
site_id = record.site_id
organisation_id = record.organisation_id
# Affiliation, record ownership and component ownership
s3db.pr_update_affiliations(htable, record)
# Realm_entity for the pr_person record
ptable = s3db.pr_person
person_id = record.person_id
if settings.get_auth_person_realm_human_resource_site_then_org():
# Set pr_person.realm_entity to the human_resource's site pe_id or organisation_pe_id
person = Storage(id = person_id)
entity = s3db.pr_get_pe_id("org_site", site_id) or \
s3db.pr_get_pe_id("org_organisation", organisation_id)
if entity:
auth.set_realm_entity(ptable, person,
entity = entity,
force_update = True,
)
tracker = S3Tracker()
if person_id:
# Set person record to follow HR record
# (Person base location remains untouched)
pr_tracker = tracker(ptable, person_id)
pr_tracker.check_in(htable, record_id, timestmp = request.utcnow)
if record.type == 1:
# Staff
vol = False
location_lookup = settings.get_hrm_location_staff()
elif record.type == 2:
# Volunteer
vol = True
location_lookup = settings.get_hrm_location_vol()
if request.controller == "deploy":
# Add deploy_application when creating inside deploy module
user_organisation_id = auth.user.organisation_id
ltable = s3db.deploy_application
if user_organisation_id:
query = (ltable.human_resource_id == record_id) & \
((ltable.organisation_id == None) |
(ltable.organisation_id == user_organisation_id))
else:
query = (ltable.human_resource_id == record_id)
exists = db(query).select(ltable.id,
limitby = (0, 1),
).first()
if not exists:
# Is there a Deployable Team for this user_org?
dotable = s3db.deploy_organisation
exists = db(dotable.organisation_id == user_organisation_id)
if exists:
# Insert record in this Deployable Team
ltable.insert(human_resource_id = record_id,
organisation_id = user_organisation_id,
)
else:
# Insert record in the global Deployable Team
ltable.insert(human_resource_id = record_id,
)
# Determine how the HR is positioned
address = None
update_location_from_site = False
site_contact = record.site_contact
hstable = s3db.hrm_human_resource_site
query = (hstable.human_resource_id == record_id)
if site_id:
# Add/update the record in the link table
this = db(query).select(hstable.id,
limitby = (0, 1),
).first()
if this:
db(query).update(site_id = site_id,
human_resource_id = record_id,
site_contact = site_contact,
)
else:
hstable.insert(site_id = site_id,
human_resource_id = record_id,
site_contact = site_contact,
)
if location_lookup == "site_id" or location_lookup[0] == "site_id":
# Use site location as HR base location
update_location_from_site = True
elif location_lookup[0] == "person_id":
# Only use site location as HR base location if the Person
# has no Home Address
atable = s3db.pr_address
query = (atable.pe_id == ptable.pe_id) & \
(ptable.id == person_id) & \
(atable.type == 1) & \
(atable.deleted == False)
address = db(query).select(atable.id,
atable.location_id,
limitby = (0, 1),
).first()
if not address:
update_location_from_site = True
else:
# location_lookup == "person_id"
# Use home address to determine HR base location
# Current Address preferred, otherwise Permanent if present
atable = s3db.pr_address
query = (atable.pe_id == ptable.pe_id) & \
(ptable.id == person_id) & \
(atable.type.belongs(1, 2)) & \
(atable.deleted == False)
address = db(query).select(atable.id,
atable.location_id,
limitby = (0, 1),
orderby = atable.type,
).first()
else:
# Delete any links in the link table
db(query).delete()
if "person_id" in location_lookup:
# Use home address to determine HR base location
# Current Address preferred, otherwise Permanent if present
atable = s3db.pr_address
query = (atable.pe_id == ptable.pe_id) & \
(ptable.id == person_id) & \
(atable.type.belongs(1, 2)) & \
(atable.deleted == False)
address = db(query).select(atable.id,
atable.location_id,
limitby = (0, 1),
orderby = atable.type,
).first()
if update_location_from_site:
# Use the site location as base location of the HR
stable = db.org_site
site = db(stable.site_id == site_id).select(stable.location_id,
limitby = (0, 1),
).first()
try:
data.location_id = location_id = site.location_id
except AttributeError:
current.log.error("Can't find site with site_id ", site_id)
data.location_id = location_id = None
elif address:
# Use the address as base location of the HR
data.location_id = location_id = address.location_id
elif vol:
# No known address and not updating location from site
# => fall back to the HR's location_id if known
if record.location_id:
# Add a new Address for the person from the HR location
location_id = record.location_id
pe = db(ptable.id == person_id).select(ptable.pe_id,
limitby = (0, 1),
).first()
try:
pe_id = pe.pe_id
except AttributeError:
current.log.error("Can't find person with id ", person_id)
else:
atable.insert(type = 1,
pe_id = pe_id,
location_id = location_id,
)
else:
data.location_id = location_id = None
else:
data.location_id = location_id = None
# Update HR base location
hrm_tracker = tracker(htable, record_id)
if location_id:
# Set Base Location
hrm_tracker.set_base_location(location_id)
else:
# Unset Base Location
hrm_tracker.set_base_location(None)
if settings.get_hrm_site_contact_unique():
# Ensure only one Site Contact per Site
if site_contact and site_id:
# Set all others in this Facility to not be the Site Contact
# @ToDo: deployment_setting to allow multiple site contacts
query = (htable.site_id == site_id) & \
(htable.site_contact == True) & \
(htable.id != record_id)
# Prevent overwriting the person_id field!
htable.person_id.update = None
db(query).update(site_contact = False)
if vol:
request_vars = request.vars
programme_id = request_vars.get("programme_id", None)
if programme_id:
# Have we already got a record for this programme?
table = s3db.hrm_programme_hours
query = (table.deleted == False) & \
(table.person_id == person_id)
existing = db(query).select(table.programme_id,
orderby = table.date,
).last()
if existing and existing.programme_id == programme_id:
# No action required
pass
else:
# Insert new record
table.insert(person_id = person_id,
date = request.utcnow,
programme_id = programme_id,
)
# Add record owner (user)
ltable = s3db.pr_person_user
utable = auth.settings.table_user
query = (ptable.id == person_id) & \
(ltable.pe_id == ptable.pe_id) & \
(utable.id == ltable.user_id)
user = db(query).select(utable.id,
utable.organisation_id,
utable.site_id,
limitby = (0, 1),
).first()
if user:
user_id = user.id
data.owned_by_user = user_id
if data:
record.update_record(**data)
if user and organisation_id:
profile = {}
if not user.organisation_id:
# Set the Organisation in the Profile, if not already set
profile["organisation_id"] = organisation_id
if not user.site_id:
# Set the Site in the Profile, if not already set
profile["site_id"] = site_id
else:
# How many active HR records does the user have?
query = (htable.deleted == False) & \
(htable.status == 1) & \
(htable.person_id == person_id)
rows = db(query).select(htable.id,
limitby = (0, 2),
)
if len(rows) == 1:
# We can safely update
profile["organisation_id"] = organisation_id
profile["site_id"] = site_id
if profile:
db(utable.id == user_id).update(**profile)
# =============================================================================
def hrm_compose():
"""
Send message to people/teams/participants
@ToDo: Better rewritten as an S3Method
"""
s3db = current.s3db
get_vars = current.request.get_vars
pe_id = None
if "human_resource.id" in get_vars:
fieldname = "human_resource.id"
record_id = get_vars.get(fieldname)
table = s3db.pr_person
htable = s3db.hrm_human_resource
query = (htable.id == record_id) & \
(htable.person_id == table.id)
title = current.T("Send a message to this person")
# URL to redirect to after message sent
url = URL(f = "compose",
vars = {fieldname: record_id},
)
elif "group_id" in get_vars:
fieldname = "group_id"
record_id = get_vars.group_id
table = s3db.pr_group
query = (table.id == record_id)
title = current.T("Send a message to this team")
# URL to redirect to after message sent
url = URL(f = "compose",
vars = {fieldname: record_id},
)
elif "training_event.id" in get_vars:
fieldname = "training_event.id"
record_id = get_vars.get(fieldname)
pe_id = get_vars.pe_id
title = current.T("Message Participants")
# URL to redirect to after message sent
url = URL(f = "training_event",
args = record_id,
)
else:
current.session.error = current.T("Record not found")
redirect(URL(f = "index"))
if not pe_id:
db = current.db
pe = db(query).select(table.pe_id,
limitby = (0, 1),
).first()
if not pe:
current.session.error = current.T("Record not found")
redirect(URL(f = "index"))
pe_id = pe.pe_id
if "hrm_id" in get_vars:
# Get the individual's communications options & preference
ctable = s3db.pr_contact
contact = db(ctable.pe_id == pe_id).select(ctable.contact_method,
limitby = (0, 1),
orderby = "priority",
).first()
if contact:
s3db.msg_outbox.contact_method.default = contact.contact_method
else:
current.session.error = current.T("No contact method found")
redirect(URL(f = "index"))
# Create the form
output = current.msg.compose(recipient = pe_id,
url = url,
)
output["title"] = title
response = current.response
representation = s3_get_extension()
response.headers["Content-Type"] = \
response.s3.content_type.get(representation, "text/html")
response.view = "msg/compose.html"
return output
# =============================================================================
def hrm_map_popup(r):
"""
Custom output to place inside a Map Popup
- called from postp of human_resource controller
"""
T = current.T
db = current.db
s3db = current.s3db
CONTACT_OPTS = current.msg.CONTACT_OPTS
record = r.record
if not record:
return ""
person_id = record.person_id
output = TABLE()
append = output.append
# Edit button
append(TR(TD(A(T("Edit"),
_target = "_blank",
_id = "edit-btn",
_href = URL(args = [r.id, "update"])
))))
# First name, last name
append(TR(TD(B("%s:" % T("Name"))),
TD(s3_fullname(person_id))))
# Job Title
if record.job_title_id:
field = r.table.job_title_id
append(TR(TD(B("%s:" % field.label)),
TD(field.represent(record.job_title_id))))
# Organization (better with just name rather than Represent)
# @ToDo: Make this configurable - some deployments will only see
# their staff so this is a meaningless field
#table = s3db.org_organisation
#query = (table.id == record.organisation_id)
#name = db(query).select(table.name,
# limitby = (0, 1),
# ).first().name
#append(TR(TD(B("%s:" % r.table.organisation_id.label)),
# TD(name)))
# Components link to the Person record
# Skills
table = s3db.hrm_competency
stable = s3db.hrm_skill
query = (table.person_id == person_id) & \
(table.deleted == False) & \
(table.skill_id == stable.id)
skills = db(query).select(stable.name)
if skills:
vals = [skill.name for skill in skills]
if len(skills) > 1:
represent = ", ".join(vals)
else:
represent = vals[0] if vals else ""
append(TR(TD(B("%s:" % T("Skills"))),
TD(represent)))
# Certificates
table = s3db.hrm_certification
ctable = s3db.hrm_certificate
query = (table.person_id == person_id) & \
(table.deleted == False) & \
(table.certificate_id == ctable.id)
certificates = db(query).select(ctable.name)
if certificates:
vals = [cert.name for cert in certificates]
if len(certificates) > 1:
represent = ", ".join(vals)
else:
represent = vals[0] if vals else ""
append(TR(TD(B("%s:" % T("Certificates"))),
TD(represent)))
# Trainings
table = s3db.hrm_training
etable = s3db.hrm_training_event
ctable = s3db.hrm_course
query = (table.person_id == person_id) & \
(table.deleted == False) & \
(table.training_event_id == etable.id) & \
(etable.course_id == ctable.id)
trainings = db(query).select(ctable.name)
if trainings:
vals = [train.name for train in trainings]
if len(trainings) > 1:
represent = ", ".join(vals)
else:
represent = vals[0] if vals else ""
append(TR(TD(B("%s:" % T("Trainings"))),
TD(represent)))
if record.location_id:
table = s3db.gis_location
query = (table.id == record.location_id)
location = db(query).select(table.path,
table.addr_street,
limitby = (0, 1),
).first()
# City
# Street address
if location.addr_street:
append(TR(TD(B("%s:" % table.addr_street.label)),
TD(location.addr_street)))
# Mobile phone number & Email address
ptable = s3db.pr_person
ctable = s3db.pr_contact
query = (ptable.id == person_id) & \
(ctable.pe_id == ptable.pe_id) & \
(ctable.deleted == False)
contacts = db(query).select(ctable.contact_method,
ctable.value,
)
email = mobile_phone = ""
for contact in contacts:
if contact.contact_method == "EMAIL":
email = contact.value
elif contact.contact_method == "SMS":
mobile_phone = contact.value
if mobile_phone:
append(TR(TD(B("%s:" % CONTACT_OPTS.get("SMS"))),
TD(mobile_phone)))
# Office number
if record.site_id:
table = s3db.org_office
query = (table.site_id == record.site_id)
office = db(query).select(table.phone1,
limitby = (0, 1),
).first()
if office and office.phone1:
append(TR(TD(B("%s:" % T("Office Phone"))),
TD(office.phone1)))
else:
# @ToDo: Support other Facility Types (Hospitals & Shelters)
pass
# Email address (as hyperlink)
if email:
append(TR(TD(B("%s:" % CONTACT_OPTS.get("EMAIL"))),
TD(A(email,
_href = "mailto:%s" % email,
))))
return output
# =============================================================================
def hrm_training_month(row):
""" Year/Month of the start date of the training event """
if hasattr(row, "hrm_training"):
row = row.hrm_training
try:
date = row.date
except AttributeError:
# not available
date = None
if date:
return "%s/%02d" % (date.year, date.month)
else:
return NONE
# -------------------------------------------------------------------------
def hrm_training_year(row):
""" The Year of the training event """
if hasattr(row, "hrm_training"):
row = row.hrm_training
try:
date = row.date
except AttributeError:
# not available
date = None
if date:
return date.year
else:
return NONE
# =============================================================================
def hrm_training_job_title(row):
"""
Which Job Titles(s) the person is active with
"""
try:
person_id = row.hrm_training.person_id
except AttributeError:
# not available
person_id = None
if person_id:
s3db = current.s3db
table = s3db.hrm_human_resource
jtable = s3db.hrm_job_title
query = (table.person_id == person_id) & \
(table.status != 2) & \
(table.job_title_id == jtable.id)
jobs = current.db(query).select(jtable.name,
distinct = True,
orderby = jtable.name,
)
if jobs:
output = ""
for job in jobs:
jobtitle = job.name
if output:
output = "%s, %s" % (output, jobtitle)
else:
output = jobtitle
return output
return NONE
# =============================================================================
def hrm_training_organisation(row):
"""
Which Organisation(s)/Branch(es) the person is actively affiliated with
"""
try:
person_id = row.hrm_training.person_id
except AttributeError:
# not available
person_id = None
if person_id:
s3db = current.s3db
table = s3db.hrm_human_resource
query = (table.person_id == person_id) & \
(table.status != 2)
orgs = current.db(query).select(table.organisation_id,
distinct = True,
)
if orgs:
output = ""
represent = s3db.org_OrganisationRepresent()
for org in orgs:
org_repr = represent(org.organisation_id)
if output:
output = "%s, %s" % (output, org_repr)
else:
output = org_repr
return output
return NONE
# =============================================================================
def hrm_rheader(r, tabs=None, profile=False):
""" Resource headers for component views """
if r.representation != "html":
# RHeaders only used in interactive views
return None
record = r.record
if record is None:
# List or Create form: rheader makes no sense here
return None
T = current.T
table = r.table
resourcename = r.name
if resourcename == "person":
record_id = r.id
db = current.db
s3db = current.s3db
htable = s3db.hrm_human_resource
settings = current.deployment_settings
get_vars = r.get_vars
hr = get_vars.get("human_resource.id", None)
if hr:
name = s3db.hrm_human_resource_represent(int(hr))
else:
# Look up HR record ID (required for link URL construction)
# @ToDo handle multiple HR records (which one are we looking at?)
query = (htable.person_id == record_id) & \
(htable.deleted == False)
hr = db(query).select(htable.id,
limitby = (0, 1),
).first()
if hr:
hr = hr.id
name = s3_fullname(record)
group = get_vars.get("group", None)
if group is None:
controller = r.controller
if controller == "vol":
group = "volunteer"
else:
group = "staff"
use_cv = settings.get_hrm_cv_tab()
record_tab = settings.get_hrm_record_tab()
experience_tab = None
service_record = ""
tbl = TABLE(TR(TH(name,
# @ToDo: Move to CSS
_style = "padding-top:15px",
),
),
)
experience_tab2 = None
if group == "volunteer":
vol_experience = settings.get_hrm_vol_experience()
if vol_experience in ("programme", "both", "activity"):
# Integrated into Record tab
#experience_tab = (T("Hours"), "hours")
# Show all Hours spent on both Programmes/Activities & Trainings
# - last month & last year
now = r.utcnow
last_year = now - datetime.timedelta(days=365)
if vol_experience == "activity":
ahtable = db.vol_activity_hours
attable = db.vol_activity_hours_activity_type
bquery = (ahtable.deleted == False) & \
(ahtable.person_id == record_id)
bleft = [attable.on(ahtable.id == attable.activity_hours_id),
]
dfield = ahtable.date
fields = [dfield,
ahtable.hours,
ahtable.id,
#ahtable.training,
attable.activity_type_id,
]
else:
ptable = s3db.hrm_programme
phtable = db.hrm_programme_hours
bquery = (phtable.deleted == False) & \
(phtable.person_id == record_id)
bleft = None
query = (phtable.programme_id == ptable.id)
query &= bquery
row = db(query).select(ptable.name,
phtable.date,
orderby = phtable.date,
).last()
if row:
programme = row.hrm_programme.name
else:
programme = ""
dfield = phtable.date
fields = [dfield,
phtable.hours,
phtable.training,
]
training_hours_year = 0
training_hours_month = 0
query = bquery & \
(dfield > last_year.date())
rows = db(query).select(*fields,
left = bleft)
programme_hours_year = 0
programme_hours_month = 0
last_month = now - datetime.timedelta(days=30)
last_month = last_month.date()
if vol_experience == "activity":
activity_hour_ids = []
ahappend = activity_hour_ids.append
activity_type_ids = []
atappend = activity_type_ids.append
for row in rows:
atappend(row["vol_activity_hours_activity_type.activity_type_id"])
ah_id = row["vol_activity_hours.id"]
if ah_id in activity_hour_ids:
# Don't double-count when more than 1 Activity Type
continue
ahappend(ah_id)
hours = row["vol_activity_hours.hours"]
if hours:
programme_hours_year += hours
if row["vol_activity_hours.date"] > last_month:
programme_hours_month += hours
# Uniquify
activity_type_ids = list(set(activity_type_ids))
# Represent
activity_types = s3db.vol_activity_activity_type.activity_type_id.represent.bulk(activity_type_ids)
if activity_types == [NONE]:
activity_types = NONE
else:
activity_types = list(activity_types.values())
activity_types.remove(NONE)
activity_types = ", ".join([s3_str(v) for v in activity_types])
else:
for row in rows:
hours = row.hours
if hours:
training = row.training
if training:
training_hours_year += hours
if row.date > last_month:
training_hours_month += hours
else:
programme_hours_year += hours
if row.date > last_month:
programme_hours_month += hours
vol_active = settings.get_hrm_vol_active()
if vol_active:
if hr:
dtable = s3db.vol_details
row = db(dtable.human_resource_id == hr).select(dtable.active,
limitby = (0, 1),
).first()
if row and row.active:
active = TD(DIV(T("Yes"),
# @ToDo: Move to CSS
_style = "color:green",
))
else:
active = TD(DIV(T("No"),
# @ToDo: Move to CSS
_style = "color:red",
))
else:
active = TD(DIV(T("No"),
# @ToDo: Move to CSS
_style = "color:red",
))
vol_active_tooltip = settings.get_hrm_vol_active_tooltip()
if vol_active_tooltip:
tooltip = SPAN(_class = "tooltip",
_title = "%s|%s" % (T("Active"),
T(vol_active_tooltip)),
_style = "display:inline-block",
)
else:
tooltip = ""
active_cells = [TH("%s:" % T("Active?"), tooltip),
active]
else:
active_cells = []
if vol_experience == "activity":
row1 = TR(*active_cells
)
row2 = TR(TH("%s:" % T("Activity Types")),
str(activity_types),
)
row3 = TR(TH("%s:" % T("Activity Hours (Month)")),
str(programme_hours_month),
)
row4 = TR(TH("%s:" % T("Activity Hours (Year)")),
str(programme_hours_year),
)
else:
if programme:
row1 = TR(TH("%s:" % T("Program")),
programme,
*active_cells
)
else:
row1 = TR(*active_cells
)
row2 = TR(TH("%s:" % T("Program Hours (Month)")),
str(programme_hours_month),
TH("%s:" % T("Training Hours (Month)")),
str(training_hours_month)
)
row3 = TR(TH("%s:" % T("Program Hours (Year)")),
str(programme_hours_year),
TH("%s:" % T("Training Hours (Year)")),
str(training_hours_year)
)
row4 = ""
tbl = TABLE(TR(TH(name,
_colspan = 4,
),
),
row1,
row2,
row3,
row4,
)
service_record = A(T("Service Record"),
_href = URL(c = "vol",
f = "human_resource",
args = [hr, "form"]
),
_id = "service_record",
_class = "action-btn"
)
if vol_experience == "both" and not use_cv:
experience_tab2 = (T("Experience"), "experience")
elif vol_experience == "experience" and not use_cv:
experience_tab = (T("Experience"), "experience")
elif settings.get_hrm_staff_experience() == "experience" and not use_cv:
experience_tab = (T("Experience"), "experience")
if settings.get_hrm_id_cards():
card_button = A(T("ID Card"),
data = {"url": URL(f = "human_resource",
args = ["%s.card" % hr]
),
},
_class = "action-btn s3-download-button",
_script = "alert('here')",
)
else:
card_button = ""
if settings.get_hrm_use_certificates() and not use_cv:
certificates_tab = (T("Certificates"), "certification")
else:
certificates_tab = None
if settings.get_hrm_use_credentials():
credentials_tab = (T("Credentials"), "credential")
else:
credentials_tab = None
if settings.get_hrm_vol_availability_tab():
availability_tab = (T("Availability"), "availability")
else:
availability_tab = None
if settings.get_hrm_unavailability():
unavailability_tab = (T("Availability"), "unavailability", {}, "organize")
else:
unavailability_tab = None
medical_tab = settings.get_hrm_use_medical() or None
if medical_tab:
medical_tab = (T(medical_tab), "medical")
description_tab = settings.get_hrm_use_description() or None
if description_tab:
description_tab = (T(description_tab), "physical_description")
if settings.get_hrm_use_education() and not use_cv:
education_tab = (T("Education"), "education")
else:
education_tab = None
if settings.get_hrm_use_id():
id_tab = (T("ID"), "identity")
else:
id_tab = None
if settings.get_hrm_use_address():
address_tab = (T("Address"), "address")
else:
address_tab = None
if settings.get_hrm_salary():
salary_tab = (T("Salary"), "salary")
else:
salary_tab = None
if settings.get_hrm_use_skills() and not use_cv:
skills_tab = (T("Skills"), "competency")
else:
skills_tab = None
if record_tab != "record":
teams = settings.get_hrm_teams()
if teams:
teams_tab = (T(teams), "group_membership")
else:
teams_tab = None
else:
teams_tab = None
trainings_tab = instructor_tab = None
if settings.get_hrm_use_trainings():
if not use_cv:
trainings_tab = (T("Trainings"), "training")
if settings.get_hrm_training_instructors() in ("internal", "both"):
instructor_tab = (T("Instructor"), "training_event")
if use_cv:
trainings_tab = (T("CV"), "cv")
hr_tab = None
duplicates_tab = None
if not record_tab:
record_method = None
elif record_tab == "record":
record_method = "record"
if not profile and current.auth.s3_has_role("ADMIN"):
query = (htable.person_id == record_id) & \
(htable.deleted == False)
hr_records = db(query).count()
if hr_records > 1:
duplicates_tab = (T("Duplicates"),
"human_resource",
{"hr": "all"}, # Ensure no &human_resource.id=XXXX
)
else:
# Default
record_method = "human_resource"
record_label = settings.get_hrm_record_label()
if profile:
# Configure for personal mode
if record_method:
hr_tab = (T(record_label), record_method)
tabs = [(T("Person Details"), None),
(T("User Account"), "user"),
hr_tab,
id_tab,
medical_tab,
description_tab,
address_tab,
]
contacts_tabs = settings.get_pr_contacts_tabs()
if "all" in contacts_tabs:
tabs.append((settings.get_pr_contacts_tab_label("all"),
"contacts",
))
if "public" in contacts_tabs:
tabs.append((settings.get_pr_contacts_tab_label("public_contacts"),
"public_contacts",
))
if "private" in contacts_tabs:
tabs.append((settings.get_pr_contacts_tab_label("private_contacts"),
"private_contacts",
))
tabs += [availability_tab,
education_tab,
trainings_tab,
certificates_tab,
skills_tab,
credentials_tab,
experience_tab,
experience_tab2,
instructor_tab,
teams_tab,
unavailability_tab,
#(T("Assets"), "asset"),
]
#elif current.session.s3.hrm.mode is not None:
# # Configure for personal mode
# tabs = [(T("Person Details"), None),
# id_tab,
# description_tab,
# address_tab,
# ]
# contacts_tabs = settings.get_pr_contacts_tabs()
# if "all" in contacts_tabs:
# tabs.append((settings.get_pr_contacts_tab_label("all"),
# "contacts",
# ))
# if "public" in contacts_tabs:
# tabs.append((settings.get_pr_contacts_tab_label("public_contacts"),
# "public_contacts",
# ))
# if "private" in contacts_tabs:
# tabs.append((settings.get_pr_contacts_tab_label("private_contacts"),
# "private_contacts",
# ))
# if record_method is not None:
# hr_tab = (T("Positions"), "human_resource")
# tabs += [availability_tab,
# trainings_tab,
# certificates_tab,
# skills_tab,
# credentials_tab,
# experience_tab,
# experience_tab2,
# hr_tab,
# teams_tab,
# (T("Assets"), "asset"),
# ]
else:
# Configure for HR manager mode
hr_record = record_label
if group == "staff":
awards_tab = None
elif group == "volunteer":
if settings.get_hrm_use_awards() and not use_cv:
awards_tab = (T("Awards"), "award")
else:
awards_tab = None
if record_method:
hr_tab = (T(hr_record), record_method)
tabs = [(T("Person Details"), None, {"native": True}),
hr_tab,
duplicates_tab,
id_tab,
medical_tab,
description_tab,
address_tab,
]
contacts_tabs = settings.get_pr_contacts_tabs()
if "all" in contacts_tabs:
tabs.append((settings.get_pr_contacts_tab_label("all"),
"contacts",
))
if "public" in contacts_tabs:
tabs.append((settings.get_pr_contacts_tab_label("public_contacts"),
"public_contacts",
))
if "private" in contacts_tabs:
tabs.append((settings.get_pr_contacts_tab_label("private_contacts"),
"private_contacts",
))
tabs += [availability_tab,
salary_tab,
education_tab,
trainings_tab,
certificates_tab,
skills_tab,
credentials_tab,
experience_tab,
experience_tab2,
instructor_tab,
awards_tab,
teams_tab,
unavailability_tab,
(T("Assets"), "asset"),
]
if settings.get_hrm_roles_tab():
# Add role manager tab if a user record exists
user_id = current.auth.s3_get_user_id(record_id)
if user_id:
tabs.append((T("Roles"), "roles"))
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader_btns = DIV(service_record, card_button,
# @ToDo: Move to CSS
_style = "margin-bottom:10px",
_class = "rheader-btns",
)
rheader = DIV(rheader_btns,
A(s3_avatar_represent(record_id,
"pr_person",
_class = "rheader-avatar",
),
_href = URL(f="person",
args = [record_id, "image", "create"],
vars = get_vars,
),
),
tbl,
rheader_tabs,
)
elif resourcename == "activity":
# Tabs
tabs = [(T("Activity Details"), None),
(T("Hours"), "hours"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % table.name.label),
record.name),
TR(TH("%s: " % table.sector_id.label),
table.sector_id.represent(record.sector_id)),
# @ToDo: (ltable)
#TR(TH("%s: " % table.activity_type_id.label),
# table.activity_type_id.represent(record.activity_type_id)),
TR(TH("%s: " % table.location_id.label),
table.location_id.represent(record.location_id)),
TR(TH("%s: " % table.date.label),
table.date.represent(record.date)),
),
rheader_tabs,
)
elif resourcename == "training_event":
settings = current.deployment_settings
# Tabs
if not tabs:
tabs = [(T("Training Event Details"), None),
(T("Participants"), "participant"),
]
if settings.has_module("dc"):
label = settings.get_dc_response_label()
if label == "Survey":
label = T("Surveys")
else:
label = T("Assessments")
tabs.append((label, "target"),)
rheader_tabs = s3_rheader_tabs(r, tabs)
action = ""
if settings.has_module("msg"):
permit = current.auth.permission.has_permission
if permit("update", c="hrm", f="compose") and permit("update", c="msg"):
# @ToDo: Be able to see who has been messaged, whether messages bounced, receive confirmation responses, etc
action = A(T("Message Participants"),
_href = URL(f = "compose",
vars = {"training_event.id": record.id,
"pe_id": record.pe_id,
},
),
_class = "action-btn send"
)
if settings.get_hrm_event_types():
event_type = TR(TH("%s: " % table.event_type_id.label),
table.event_type_id.represent(record.event_type_id))
event_name = TR(TH("%s: " % table.name.label),
record.name)
else:
event_type = ""
event_name = ""
instructors = settings.get_hrm_training_instructors()
if instructors == "internal":
instructors = TR(TH("%s: " % table.person_id.label),
table.person_id.represent(record.person_id))
elif instructors == "external":
instructors = TR(TH("%s: " % table.instructor.label),
table.instructor.represent(record.instructor))
elif instructors == "both":
instructors = TAG[""](TR(TH("%s: " % table.person_id.label),
table.person_id.represent(record.person_id)),
TR(TH("%s: " % table.instructor.label),
table.instructor.represent(record.instructor)))
elif instructors == "multiple":
itable = current.s3db.hrm_training_event_instructor
pfield = itable.person_id
instructors = current.db(itable.training_event_id == r.id).select(pfield)
represent = pfield.represent
instructors = ",".join([represent(i.person_id) for i in instructors])
instructors = TR(TH("%s: " % T("Instructors")),
instructors)
else:
instructors = ""
rheader = DIV(TABLE(event_type,
event_name,
TR(TH("%s: " % table.organisation_id.label),
table.organisation_id.represent(record.organisation_id)),
TR(TH("%s: " % table.course_id.label),
table.course_id.represent(record.course_id)),
TR(TH("%s: " % table.site_id.label),
table.site_id.represent(record.site_id)),
TR(TH("%s: " % table.start_date.label),
table.start_date.represent(record.start_date)),
instructors,
TR(TH(action,
_colspan = 2,
)),
),
rheader_tabs,
)
elif resourcename == "certificate":
# Tabs
tabs = [(T("Certificate Details"), None),
]
settings = current.deployment_settings
if settings.get_hrm_use_skills() and settings.get_hrm_certificate_skill():
tabs.append((T("Skill Equivalence"), "certificate_skill"))
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % table.name.label),
record.name),
),
rheader_tabs,
)
elif resourcename == "certification":
# Tabs
tabs = [(T("Certification Details"), None),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % table.person_id.label),
table.person_id.represent(record.person_id)),
TR(TH("%s: " % table.certificate_id.label),
table.certificate_id.represent(record.certificate_id)),
),
rheader_tabs,
)
elif resourcename == "course":
# Tabs
tabs = [(T("Course Details"), None),
(T("Course Certificates"), "course_certificate"),
(T("Trainees"), "training"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % table.name.label),
record.name),
),
rheader_tabs,
)
elif resourcename == "programme":
# Tabs
tabs = [(T("Program Details"), None),
(T("Volunteer Hours"), "person"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % table.name.label),
record.name),
),
rheader_tabs,
)
elif resourcename == "shift":
db = current.db
s3db = current.s3db
record_id = r.id
# Look up Site
stable = s3db.org_site_shift
link = db(stable.shift_id == record_id).select(stable.site_id,
limitby = (0, 1),
).first()
if link:
site_id = link.site_id
else:
site_id = None
# Look up Assigned
htable = s3db.hrm_human_resource_shift
link = db(htable.shift_id == record_id).select(htable.human_resource_id,
limitby = (0, 1),
).first()
if link:
human_resource_id = link.human_resource_id
else:
human_resource_id = None
rheader = DIV(TABLE(TR(TH("%s: " % stable.site_id.label),
stable.site_id.represent(site_id),
),
TR(TH("%s: " % table.skill_id.label),
table.skill_id.represent(record.skill_id),
TH("%s: " % table.job_title_id.label),
table.job_title_id.represent(record.job_title_id),
),
TR(TH("%s: " % table.start_date.label),
table.start_date.represent(record.start_date),
TH("%s: " % table.end_date.label),
table.end_date.represent(record.end_date),
),
TR(TH("%s: " % htable.human_resource_id.label),
htable.human_resource_id.represent(human_resource_id),
),
),
)
else:
rheader = None
return rheader
# =============================================================================
def hrm_competency_controller():
"""
RESTful CRUD controller
- used for Searching for people by Skill
- used for Adding/Editing on Profile page
"""
T = current.T
s3db = current.s3db
s3 = current.response.s3
def prep(r):
if r.method in ("create", "create.popup", "update", "update.popup"):
# Coming from Profile page?
table = r.table
get_vars = r.get_vars
person_id = get_vars.get("~.person_id", None)
if person_id:
try:
person_id = int(person_id)
except ValueError:
pass
else:
field = table.person_id
field.default = person_id
field.readable = field.writable = False
# Additional filtering of the profile section by skill type
skill_type_name = get_vars.get("~.skill_id$skill_type_id$name")
if skill_type_name:
ttable = s3db.hrm_skill_type
query = (ttable.name == skill_type_name)
rows = current.db(query).select(ttable.id)
skill_type_ids = [row.id for row in rows]
if skill_type_ids:
field = table.skill_id
requires = field.requires
if isinstance(requires, IS_EMPTY_OR):
requires = requires.other
if hasattr(requires, "set_filter"):
requires.set_filter(filterby = "skill_type_id",
filter_opts = skill_type_ids,
)
elif not r.id:
filter_widgets = [
S3TextFilter(["person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
"person_id$hrm_human_resource.job_title_id$name",
],
label = T("Search"),
comment = T("You can search by job title or person name - enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons."),
),
S3OptionsFilter("skill_id",
label = T("Skills"),
options = lambda: \
s3_get_filter_opts("hrm_skill", translate=True),
),
S3OptionsFilter("competency_id",
label = T("Competency"),
options = lambda: \
s3_get_filter_opts("hrm_competency_rating", translate=True),
),
]
s3db.configure("hrm_competency",
filter_widgets = filter_widgets,
list_fields = ["person_id",
"skill_id",
"competency_id",
"comments",
],
)
return True
s3.prep = prep
def postp(r, output):
if r.interactive:
# Custom action button to add the member to a team
S3CRUD.action_buttons(r)
args = ["[id]", "group_membership"]
s3.actions.append({"label": str(T("Add to a Team")),
"_class": "action-btn",
"url": URL(f = "person",
args = args),
}
)
return output
s3.postp = postp
return current.rest_controller("hrm", "competency",
# @ToDo: Create these if-required
#csv_stylesheet = ("hrm", "competency.xsl"),
#csv_template = ("hrm", "competency"),
)
# =============================================================================
def hrm_credential_controller():
"""
RESTful CRUD controller
- could be used for Searching for people by Skill
- used for Adding/Editing on Profile page
"""
s3 = current.response.s3
def prep(r):
table = r.table
if r.method in ("create", "create.popup", "update", "update.popup"):
# Coming from Profile page?
person_id = r.get_vars.get("~.person_id", None)
if person_id:
field = table.person_id
field.default = person_id
field.readable = field.writable = False
if r.record:
table.person_id.comment = None
table.person_id.writable = False
return True
s3.prep = prep
return current.rest_controller("hrm", "credential",
# @ToDo: Create these if-required
#csv_stylesheet = ("hrm", "credential.xsl"),
#csv_template = ("hrm", "credential"),
)
# =============================================================================
def hrm_experience_controller():
"""
Experience Controller, defined in the model for use from
multiple controllers for unified menus
- used for Adding/Editing on Profile page
"""
def prep(r):
if r.method in ("create", "update"):
# Coming from Profile page?
field = current.s3db.hrm_experience.person_id
person_id = current.request.get_vars.get("~.person_id", None)
if person_id:
field.default = person_id
field.readable = field.writable = False
elif r.method == "update":
# Workaround until generic solution available:
refresh = r.get_vars.get("refresh")
if refresh and refresh.startswith("profile-list-hrm_experience"):
field.readable = field.writable = False
return True
current.response.s3.prep = prep
return current.rest_controller("hrm", "experience",
# @ToDo: Create these if-required
#csv_stylesheet = ("hrm", "experience.xsl"),
#csv_template = ("hrm", "experience"),
)
# =============================================================================
def hrm_group_controller():
"""
Team controller
- uses the group table from PR
"""
T = current.T
s3db = current.s3db
s3 = current.response.s3
settings = current.deployment_settings
team_name = settings.get_hrm_teams()
tablename = "pr_group"
table = s3db[tablename]
_group_type = table.group_type
if team_name == "Teams":
_group_type.label = T("Team Type")
table.description.label = T("Team Description")
table.name.label = T("Team Name")
# Default anyway
#elif team_name == "Groups":
# _group_type.label = T("Group Type")
# table.description.label = T("Group Description")
# table.name.label = T("Group Name")
# Set Defaults
_group_type.default = 3 # 'Relief Team'
# We use crud_form
#_group_type.readable = _group_type.writable = False
# Only show Relief Teams
# Do not show system groups
s3.filter = (table.system == False) & \
(_group_type == 3)
if team_name == "Teams":
# CRUD Strings
s3.crud_strings[tablename] = Storage(
label_create = T("Add Team"),
title_display = T("Team Details"),
title_list = T("Teams"),
title_update = T("Edit Team"),
label_list_button = T("List Teams"),
label_search_button = T("Search Teams"),
msg_record_created = T("Team added"),
msg_record_modified = T("Team updated"),
msg_record_deleted = T("Team deleted"),
msg_list_empty = T("No Teams currently registered"),
)
# Format for filter_widgets & imports
s3db.add_components("pr_group",
org_organisation_team = "group_id")
# Pre-process
def prep(r):
# Redirect to member list when a new group has been created
create_next = URL(f="group",
args=["[id]", "group_membership"])
teams_orgs = settings.get_hrm_teams_orgs()
if teams_orgs:
if teams_orgs == 1:
multiple = False
else:
multiple = True
ottable = s3db.org_organisation_team
label = ottable.organisation_id.label
ottable.organisation_id.label = ""
crud_form = S3SQLCustomForm("name",
"description",
S3SQLInlineComponent("organisation_team",
label = label,
fields = ["organisation_id"],
multiple = multiple,
),
"comments",
)
filter_widgets = [
S3TextFilter(["name",
"description",
"comments",
"organisation_team.organisation_id$name",
"organisation_team.organisation_id$acronym",
],
label = T("Search"),
comment = T("You can search by by group name, description or comments and by organization name or acronym. You may use % as wildcard. Press 'Search' without input to list all."),
#_class="filter-search",
),
S3OptionsFilter("organisation_team.organisation_id",
label = T("Organization"),
#hidden=True,
),
]
list_fields = ["organisation_team.organisation_id",
"name",
"description",
"comments",
]
s3db.configure("pr_group",
create_next = create_next,
crud_form = crud_form,
filter_widgets = filter_widgets,
list_fields = list_fields,
)
else:
s3db.configure("pr_group",
create_next = create_next,
)
if r.interactive or r.representation in ("aadata", "xls", "pdf"):
if r.component_name == "group_membership":
hrm_configure_pr_group_membership()
if r.representation == "xls":
# Modify Title of Report to show Team Name
s3.crud_strings.pr_group_membership.title_list = r.record.name
# Make it match Import sheets
tablename = "pr_group_membership"
list_fields = s3db.get_config(tablename, "list_fields")
# Remove "id" as XLS exporter doesn't like this not being first & has complicated skipping routines
try:
list_fields.remove("id")
except ValueError:
pass
# Separate Facility Type from Facility Name
s3db.hrm_human_resource.site_id.represent = s3db.org_SiteRepresent(show_type = False)
i = 0
for f in list_fields:
i += 1
if f == "site_id":
break
list_fields.insert(i,
(T("Facility Type"),
"person_id$human_resource.site_id$instance_type"))
# Split person_id into first/middle/last
try:
list_fields.remove("person_id")
except ValueError:
pass
list_fields = ["person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
] + list_fields
s3db.configure(tablename,
list_fields = list_fields,
)
return True
s3.prep = prep
# Post-process
def postp(r, output):
if r.interactive:
if not r.component:
update_url = URL(args=["[id]", "group_membership"])
S3CRUD.action_buttons(r, update_url=update_url)
if current.deployment_settings.has_module("msg") and \
current.auth.permission.has_permission("update", c="hrm",
f="compose"):
s3.actions.append({
"url": URL(f="compose",
vars = {"group_id": "[id]"}),
"_class": "action-btn send",
"label": s3_str(T("Send Message"))})
return output
s3.postp = postp
if team_name == "Team":
label = T("Team Details")
elif team_name == "Group":
label = T("Group Details")
else:
label = T("Basic Details")
tabs = [(label, None),
# Team should be contacted either via the Leader or
# simply by sending a message to the group as a whole.
#(T("Contact Data"), "contact"),
(T("Members"), "group_membership"),
(T("Documents"), "document"),
]
return current.rest_controller("pr", "group",
csv_stylesheet = ("hrm", "group.xsl"),
csv_template = "group",
rheader = lambda r: \
s3db.pr_rheader(r, tabs=tabs),
)
# =============================================================================
def hrm_human_resource_controller(extra_filter = None):
"""
Human Resources Controller, defined in the model for use from
multiple controllers for unified menus
- used for Summary & Profile views, Imports and S3AddPersonWidget
"""
T = current.T
db = current.db
s3db = current.s3db
s3 = current.response.s3
settings = current.deployment_settings
def prep(r):
# Apply extra filter from controller
if extra_filter is not None:
r.resource.add_filter(extra_filter)
c = r.controller
deploy = c == "deploy"
vol = c == "vol"
if deploy:
# Apply availability filter
s3db.deploy_availability_filter(r)
elif settings.get_hrm_unavailability():
# Apply availability filter
s3db.pr_availability_filter(r)
if s3.rtl:
# Ensure that + appears at the beginning of the number
# - using table alias to only apply to filtered component
f = s3db.get_aliased(s3db.pr_contact, "pr_phone_contact").value
f.represent = s3_phone_represent
f.widget = S3PhoneWidget()
method = r.method
if method in ("form", "lookup"):
return True
elif method == "profile":
# Adapt list_fields for pr_address
s3db.table("pr_address") # must load model before get_config
list_fields = s3db.get_config("pr_address", "list_fields")
list_fields.append("comments")
# Show training date without time
s3db.hrm_training.date.represent = lambda d: \
S3DateTime.date_represent(d, utc=True)
# Adapt list_fields for hrm_training
list_fields = ["course_id",
"training_event_id$site_id",
"date",
"hours",
"grade",
"comments",
]
if deploy:
list_fields.append("course_id$course_job_title.job_title_id")
s3db.configure("hrm_training",
list_fields = list_fields,
)
# Adapt list_fields for hrm_experience
s3db.table("hrm_experience") # Load normal model
s3db.configure("hrm_experience",
list_fields = [#"code",
"employment_type",
"activity_type",
"organisation_id",
"organisation",
"job_title_id",
"job_title",
"responsibilities",
"start_date",
"end_date",
"hours",
"location_id",
"supervisor_id",
"comments",
],
)
# Get the person's full name for header, and pe_id for
# context filtering
table = r.table
record = r.record
person_id = record.person_id
ptable = db.pr_person
person = db(ptable.id == person_id).select(ptable.first_name,
ptable.middle_name,
ptable.last_name,
ptable.pe_id,
limitby = (0, 1),
).first()
name = s3_fullname(person)
pe_id = person.pe_id
comments = table.organisation_id.represent(record.organisation_id)
if record.job_title_id:
comments = (SPAN("%s, " % \
s3_str(table.job_title_id.represent(record.job_title_id))),
comments)
# Configure widgets
contacts_widget = {"label": "Contacts",
"label_create": "Add Contact",
"tablename": "pr_contact",
"type": "datalist",
"filter": FS("pe_id") == pe_id,
"icon": "phone",
# Default renderer:
#"list_layout": s3db.pr_render_contact,
"orderby": "priority asc",
# Can't do this as this is the HR perspective, not Person perspective
#"create_controller": c,
#"create_function": "person",
#"create_component": "contact",
}
address_widget = {"label": "Address",
"label_create": "Add Address",
"type": "datalist",
"tablename": "pr_address",
"filter": FS("pe_id") == pe_id,
"icon": "home",
# Default renderer:
#"list_layout": s3db.pr_render_address,
# Can't do this as this is the HR perspective, not Person perspective
#"create_controller": c,
#"create_function": "person",
#"create_component": "address",
}
skills_widget = {"label": "Skills",
"label_create": "Add Skill",
"type": "datalist",
"tablename": "hrm_competency",
"filter": FS("person_id") == person_id,
"icon": "comment-alt",
# Default renderer:
#"list_layout": hrm_competency_list_layout,
"create_controller": c,
# Can't do this as this is the HR perspective, not Person perspective
#"create_function": "person",
#"create_component": "competency",
}
trainings_widget = {"label": "Trainings",
"label_create": "Add Training",
"type": "datalist",
"tablename": "hrm_training",
"filter": FS("person_id") == person_id,
"icon": "wrench",
# Default renderer:
#"list_layout": hrm_training_list_layout,
"create_controller": c,
# Can't do this as this is the HR perspective, not Person perspective
#"create_function": "person",
#"create_component": "training",
}
experience_widget = {"label": "Experience",
"label_create": "Add Experience",
"type": "datalist",
"tablename": "hrm_experience",
"filter": FS("person_id") == person_id,
"icon": "truck",
# Default renderer:
#"list_layout": hrm_experience_list_layout,
"create_controller": c,
# Can't do this as this is the HR perspective, not Person perspective
#"create_function": "person",
#"create_component": "experience",
}
docs_widget = {"label": "Documents",
"label_create": "Add Document",
"type": "datalist",
"tablename": "doc_document",
"filter": FS("doc_id") == record.doc_id,
"icon": "attachment",
# Default renderer:
#"list_layout": s3db.doc_document_list_layout,
}
profile_widgets = [contacts_widget,
address_widget,
skills_widget,
trainings_widget,
experience_widget,
docs_widget,
]
if settings.get_hrm_use_education():
education_widget = {"label": "Education",
"label_create": "Add Education",
"type": "datalist",
"tablename": "pr_education",
"filter": FS("person_id") == person_id,
"icon": "book",
# Can't do this as this is the HR perspective, not Person perspective
#"create_controller": c,
#"create_function": "person",
#"create_component": "education",
}
profile_widgets.insert(-1, education_widget)
if deploy:
credentials_widget = {# @ToDo: deployment_setting for Labels
"label": "Sectors",
"label_create": "Add Sector",
"type": "datalist",
"tablename": "hrm_credential",
"filter": FS("person_id") == person_id,
"icon": "tags",
# Default renderer:
#"list_layout": hrm_credential_list_layout,
"create_controller": c,
# Can't do this as this is the HR perspective, not Person perspective
#"create_function": "person",
#"create_component": "credential",
}
profile_widgets.insert(2, credentials_widget)
# Organizer-widget to record periods of unavailability:
#profile_widgets.append({"label": "Unavailability",
# "type": "organizer",
# "tablename": "deploy_unavailability",
# "master": "pr_person/%s" % person_id,
# "component": "unavailability",
# "icon": "calendar",
# "url": URL(c="deploy", f="person",
# args = [person_id, "unavailability"],
# ),
# })
if settings.get_hrm_unavailability():
unavailability_widget = {"label": "Unavailability",
"type": "organizer",
"tablename": "pr_unavailability",
"master": "pr_person/%s" % person_id,
"component": "unavailability",
"icon": "calendar",
"url": URL(c="pr", f="person",
args = [person_id, "unavailability"],
),
}
profile_widgets.insert(-1, unavailability_widget)
# Configure resource
s3db.configure("hrm_human_resource",
profile_cols = 1,
profile_header = DIV(A(s3_avatar_represent(person_id,
tablename = "pr_person",
_class = "media-object",
),
_class = "pull-left",
#_href = event_url,
),
H2(name),
P(comments),
_class = "profile-header",
),
profile_title = "%s : %s" % (
s3_str(s3.crud_strings["hrm_human_resource"].title_display),
s3_str(name),
),
profile_widgets = profile_widgets,
)
elif method == "summary":
# CRUD Strings
if deploy:
deploy_team = settings.get_deploy_team_label()
s3.crud_strings["hrm_human_resource"]["title_list"] = \
T("%(team)s Members") % {"team": T(deploy_team)}
else:
s3.crud_strings["hrm_human_resource"]["title_list"] = \
T("Staff & Volunteers")
# Filter Widgets
filter_widgets = hrm_human_resource_filters(resource_type = "both",
hrm_type_opts = s3db.hrm_type_opts)
# List Fields
list_fields = ["person_id",
"job_title_id",
"organisation_id",
]
# Report Options
report_fields = ["organisation_id",
"person_id",
"person_id$gender",
"job_title_id",
(T("Training"), "training.course_id"),
]
rappend = report_fields.append
if settings.get_hrm_use_national_id():
list_fields.append((T("National ID"), "person_id$national_id.value"))
use_code = settings.get_hrm_use_code()
if use_code is True or use_code and not vol:
list_fields.append("code")
if vol:
vol_active = settings.get_hrm_vol_active()
if vol_active:
list_fields.append((T("Active"), "details.active"))
rappend((T("Active"), "details.active"))
vol_experience = settings.get_hrm_vol_experience()
if vol_experience in ("programme", "both"):
list_fields.append((T("Program"), "person_id$hours.programme_id"))
rappend((T("Program"), "person_id$hours.programme_id"))
elif settings.get_hrm_staff_departments():
list_fields.extend(("department_id",
"site_id"))
report_fields.extend(("site_id",
"department_id"))
else:
list_fields.append("site_id")
rappend("site_id")
list_fields.extend(((T("Email"), "email.value"),
(settings.get_ui_label_mobile_phone(), "phone.value"),
))
# Which levels of Hierarchy are we using?
levels = current.gis.get_relevant_hierarchy_levels()
for level in levels:
rappend("location_id$%s" % level)
if deploy:
rappend((T("Credential"), "credential.job_title_id"))
teams = settings.get_hrm_teams()
if teams:
if teams == "Teams":
teams = "Team"
elif teams == "Groups":
teams = "Group"
rappend((teams, "group_membership.group_id"))
if settings.get_org_regions():
rappend("organisation_id$organisation_region.region_id")
report_options = Storage(rows = report_fields,
cols = report_fields,
fact = report_fields,
defaults = Storage(
rows = "organisation_id",
cols = "training.course_id",
fact = "count(person_id)",
totals = True,
)
)
# Configure resource
s3db.configure("hrm_human_resource",
filter_widgets = filter_widgets,
list_fields = list_fields,
report_options = report_options,
)
# Remove controller filter
#s3.filter = None
#elif r.representation in ("geojson", "plain") or deploy:
# # No filter
# pass
#else:
# if vol:
# # Default to Volunteers
# type_filter = FS("type") == 2
# else:
# # Default to Staff
# type_filter = FS("type") == 1
# r.resource.add_filter(type_filter)
# Others
if r.interactive:
if method == "create" and not r.component:
if not settings.get_hrm_mix_staff():
# Need to either create a Staff or a Volunteer through separate forms
if vol:
c = "vol"
f = "volunteer"
else:
c = "hrm"
f = "staff"
# @ToDo: Forward instead? (see org/site)
redirect(URL(c=c, f=f,
args = r.args,
vars = r.vars,
))
elif method == "delete":
if deploy:
# Delete the Application, not the HR
atable = s3db.deploy_application
app = db(atable.human_resource_id == r.id).select(atable.id,
limitby = (0, 1),
).first()
if not app:
current.session.error = "Cannot find Application to delete!"
redirect(URL(args = "summary"))
redirect(URL(f="application",
args = [app.id, "delete"],
))
else:
# Don't redirect
pass
elif method == "profile":
# Don't redirect
pass
# Now done in s3merge
#elif method == "deduplicate":
# # Don't use AddPersonWidget here
# from gluon.sqlhtml import OptionsWidget
# field = r.table.person_id
# field.requires = IS_ONE_OF(db, "pr_person.id",
# label = field.represent)
# field.widget = OptionsWidget.widget
elif r.id:
# Redirect to person controller
# @ToDo: Forward instead? (see org/site)
if r.record.type == 2:
group = "volunteer"
else:
group = "staff"
if r.function == "trainee":
fn = "trainee_person"
else:
fn = "person"
redirect(URL(f = fn,
args = [method] if method else [],
vars = {"human_resource.id" : r.id,
"group" : group
},
))
elif r.representation == "xls" and not r.component:
hrm_xls_list_fields(r)
return True
s3.prep = prep
def postp(r, output):
if r.interactive:
if not r.component:
if r.controller == "deploy":
# Application is deleted, not HR
deletable = True
# Open Profile page
read_url = URL(args = ["[id]", "profile"])
update_url = URL(args = ["[id]", "profile"])
else:
deletable = settings.get_hrm_deletable()
# Standard CRUD buttons
read_url = None
update_url = None
S3CRUD.action_buttons(r,
deletable = deletable,
read_url = read_url,
update_url = update_url)
if "msg" in settings.modules and \
settings.get_hrm_compose_button() and \
current.auth.permission.has_permission("update",
c="hrm",
f="compose"):
s3.actions.append({"url": URL(f="compose",
vars = {"human_resource.id": "[id]"},
),
"_class": "action-btn send",
"label": s3_str(T("Send Message"))
})
elif r.representation == "plain":
# Map Popups
output = hrm_map_popup(r)
return output
s3.postp = postp
return current.rest_controller("hrm", "human_resource")
# =============================================================================
def hrm_person_controller(**attr):
"""
Persons Controller, defined in the model for use from
multiple controllers for unified menus
- used for access to component Tabs, Personal Profile & Imports
- includes components relevant to HRM
"""
T = current.T
db = current.db
s3db = current.s3db
#auth = current.auth
response = current.response
session = current.session
settings = current.deployment_settings
s3 = response.s3
configure = s3db.configure
set_method = s3db.set_method
# Custom Method(s) for Contacts
contacts_tabs = settings.get_pr_contacts_tabs()
if contacts_tabs:
from .pr import pr_Contacts
if "all" in contacts_tabs:
set_method("pr", "person",
method = "contacts",
action = pr_Contacts,
)
if "public" in contacts_tabs:
set_method("pr", "person",
method = "public_contacts",
action = pr_Contacts,
)
if "private" in contacts_tabs:
set_method("pr", "person",
method = "private_contacts",
action = pr_Contacts,
)
# Custom Method for CV
set_method("pr", "person",
method = "cv",
action = hrm_CV,
)
# Custom Method for Medical
set_method("pr", "person",
method = "medical",
action = hrm_Medical,
)
# Custom Method for HR Record
set_method("pr", "person",
method = "record",
action = hrm_Record,
)
if settings.has_module("asset"):
# Assets as component of people
s3db.add_components("pr_person",
asset_asset = "assigned_to_id",
)
# Edits should always happen via the Asset Log
# @ToDo: Allow this method too, if we can do so safely
configure("asset_asset",
deletable = False,
editable = False,
insertable = False,
)
get_vars = current.request.get_vars
group = get_vars.get("group", "staff")
hr_id = get_vars.get("human_resource.id", None)
if not str(hr_id).isdigit():
hr_id = None
# Configure human resource table
table = s3db.hrm_human_resource
table.type.default = 1
get_vars["xsltmode"] = "staff"
if hr_id:
hr = db(table.id == hr_id).select(table.type,
limitby = (0, 1),
).first()
if hr:
group = "volunteer" if hr.type == 2 else "staff"
# Also inform the back-end of this finding
get_vars["group"] = group
# Configure person table
table = db.pr_person
tablename = "pr_person"
configure(tablename,
deletable = False,
)
#mode = session.s3.hrm.mode
#if mode is not None:
# # Configure for personal mode
# s3.crud_strings[tablename].update(
# title_display = T("Personal Profile"),
# title_update = T("Personal Profile"))
# # People can view their own HR data, but not edit it
# # - over-ride in Template if need to make any elements editable
# configure("hrm_human_resource",
# deletable = False,
# editable = False,
# insertable = False,
# )
# configure("hrm_certification",
# deletable = False,
# editable = False,
# insertable = False,
# )
# configure("hrm_credential",
# deletable = False,
# editable = False,
# insertable = False,
# )
# configure("hrm_competency",
# deletable = False,
# editable = False,
# insertable = True, # Can add unconfirmed
# )
# configure("hrm_training", # Can add but not provide grade
# deletable = False,
# editable = False,
# insertable = True,
# )
# configure("hrm_experience",
# deletable = False,
# editable = False,
# insertable = False,
# )
# configure("pr_group_membership",
# deletable = False,
# editable = False,
# insertable = False,
# )
#else:
# Configure for HR manager mode
if settings.get_hrm_staff_label() == T("Contacts"):
s3.crud_strings[tablename].update(
title_upload = T("Import Contacts"),
title_display = T("Contact Details"),
title_update = T("Contact Details"),
)
elif group == "volunteer":
s3.crud_strings[tablename].update(
title_upload = T("Import Volunteers"),
title_display = T("Volunteer Details"),
title_update = T("Volunteer Details"),
)
else:
s3.crud_strings[tablename].update(
title_upload = T("Import Staff"),
title_display = T("Staff Member Details"),
title_update = T("Staff Member Details"),
)
# Import pre-process
def import_prep(data, group=group):
"""
Deletes all HR records (of the given group) of the
organisation/branch before processing a new data import
"""
if s3.import_replace:
resource, tree = data
if tree is not None:
xml = current.xml
tag = xml.TAG
att = xml.ATTRIBUTE
if group == "staff":
group = 1
elif group == "volunteer":
group = 2
else:
return # don't delete if no group specified
root = tree.getroot()
expr = "/%s/%s[@%s='org_organisation']/%s[@%s='name']" % \
(tag.root, tag.resource, att.name, tag.data, att.field)
orgs = root.xpath(expr)
for org in orgs:
org_name = org.get("value", None) or org.text
if org_name:
try:
org_name = json.loads(xml.xml_decode(org_name))
except:
pass
if org_name:
htable = s3db.hrm_human_resource
otable = s3db.org_organisation
query = (otable.name == org_name) & \
(htable.organisation_id == otable.id) & \
(htable.type == group)
resource = s3db.resource("hrm_human_resource", filter=query)
# Use cascade=True so that the deletion gets
# rolled back if the import fails:
resource.delete(format = "xml",
cascade = True,
)
s3.import_prep = import_prep
# CRUD pre-process
def prep(r):
# Filter to just those people with an active HR record
r.resource.add_filter(FS("human_resource.id") != None)
# Plug-in role matrix for Admins/OrgAdmins
S3PersonRoleManager.set_method(r, entity="pr_person")
if s3.rtl:
# Ensure that + appears at the beginning of the number
# - using table alias to only apply to filtered component
f = s3db.get_aliased(s3db.pr_contact, "pr_phone_contact").value
f.represent = s3_phone_represent
f.widget = S3PhoneWidget()
method = r.method
if r.representation == "s3json":
current.xml.show_ids = True
elif r.interactive and method != "import":
if not r.component:
table = r.table
table.pe_label.readable = table.pe_label.writable = False
table.age_group.readable = table.age_group.writable = False
# Assume volunteers only between 5-120
dob = table.date_of_birth
dob.widget = S3CalendarWidget(past_months = 1440,
future_months = -60,
)
person_details_table = s3db.pr_person_details
# No point showing the 'Occupation' field - that's the Job Title in the Staff Record
person_details_table.occupation.readable = person_details_table.occupation.writable = False
# Organisation Dependent Fields
# - deprecated (IFRC template only)
#set_org_dependent_field = settings.set_org_dependent_field
#set_org_dependent_field("pr_person", "middle_name")
#set_org_dependent_field("pr_person_details", "father_name")
#set_org_dependent_field("pr_person_details", "mother_name")
#set_org_dependent_field("pr_person_details", "grandfather_name")
#set_org_dependent_field("pr_person_details", "affiliations")
#set_org_dependent_field("pr_person_details", "company")
else:
component_name = r.component_name
if component_name == "physical_description":
# Hide all but those details that we want
# Lock all the fields
table = r.component.table
for field in table.fields:
table[field].writable = table[field].readable = False
# Now enable those that we want
table.ethnicity.writable = table.ethnicity.readable = True
table.blood_type.writable = table.blood_type.readable = True
table.medical_conditions.writable = table.medical_conditions.readable = True
table.other_details.writable = table.other_details.readable = True
elif component_name == "appraisal":
mission_id = r.get_vars.get("mission_id", None)
if mission_id:
hatable = r.component.table
# Lookup Code
mtable = s3db.deploy_mission
mission = db(mtable.id == mission_id).select(mtable.code,
limitby = (0, 1),
).first()
if mission:
hatable.code.default = mission.code
# Lookup Job Title
atable = db.deploy_assignment
htable = db.hrm_human_resource
query = (atable.mission_id == mission_id) & \
(atable.human_resource_id == htable.id) & \
(htable.person_id == r.id)
assignment = db(query).select(atable.job_title_id,
limitby = (0, 1),
).first()
if assignment:
hatable.job_title_id.default = assignment.job_title_id
elif component_name == "asset":
# Edits should always happen via the Asset Log
# @ToDo: Allow this method too, if we can do so safely
configure("asset_asset",
insertable = False,
editable = False,
deletable = False,
)
elif component_name == "group_membership":
hrm_configure_pr_group_membership()
elif component_name == "image":
if r.method == "create":
# Coming from the rheader...simplify UI
table = s3db.pr_image
f = table.profile
f.default = True
f.readable = f.writable = False
table.image.comment = None
table.type.readable = table.type.writable = False
table.url.readable = table.url.writable = False
table.description.readable = table.description.writable = False
elif component_name == "salary":
hrm_configure_salary(r)
elif component_name == "user":
r.component.configure(deletable = False)
current.auth.configure_user_fields()
utable = db.auth_user
# Don't allow password changes here (doesn't require old password)
utable.password.readable = utable.password.writable = False
# User cannot amend their own Org/Site/Link
f = utable.organisation_id
f.writable = False
f.comment = None
f = utable.site_id
f.writable = False
f.comment = None
f = utable.link_user_to
f.writable = False
f.comment = None
def auth_user_onaccept(form):
language = form.vars.get("language")
if language:
T.force(language)
session.s3.language = language
s3db.configure("auth_user",
onaccept = auth_user_onaccept,
)
if method == "record" or r.component_name == "human_resource":
table = s3db.hrm_human_resource
table.person_id.writable = table.person_id.readable = False
table.site_id.readable = table.site_id.writable = True
#org = session.s3.hrm.org
#f = table.organisation_id
#if org is None:
# f.widget = None
# f.writable = False
#else:
# f.default = org
# f.readable = f.writable = False
# table.site_id.requires = IS_EMPTY_OR(
# IS_ONE_OF(db,
# "org_site.%s" % s3db.super_key(db.org_site),
# s3db.org_site_represent,
# filterby="organisation_id",
# filter_opts=(session.s3.hrm.org,),
# ))
elif method == "cv" or r.component_name == "training":
list_fields = ["course_id",
"grade",
]
if settings.get_hrm_course_pass_marks:
list_fields.append("grade_details")
list_fields.append("date")
s3db.configure("hrm_training",
list_fields = list_fields,
)
resource = r.resource
#if mode is not None:
# resource.build_query(id=auth.s3_logged_in_person())
if method not in ("deduplicate", "search_ac"):
if not r.id and not hr_id:
# pre-action redirect => must retain prior errors
if response.error:
session.error = response.error
redirect(URL(r=r, f="staff"))
if resource.count() == 1:
resource.load()
r.record = resource.records().first()
if r.record:
r.id = r.record.id
if not r.record:
session.error = T("Record not found")
redirect(URL(f="staff"))
if hr_id and r.component_name == "human_resource":
r.component_id = hr_id
configure("hrm_human_resource",
insertable = False,
)
elif r.representation == "aadata":
if r.component_name == "group_membership":
hrm_configure_pr_group_membership()
elif method == "cv" or r.component_name == "training":
list_fields = ["course_id",
"grade",
]
if settings.get_hrm_course_pass_marks:
list_fields.append("grade_details")
list_fields.append("date")
s3db.configure("hrm_training",
list_fields = list_fields,
)
return True
s3.prep = prep
# CRUD post-process
def postp(r, output):
if r.interactive and r.component:
if r.component_name == "asset":
# Provide a link to assign a new Asset
# @ToDo: Proper Widget to do this inline
output["add_btn"] = A(T("Assign Asset"),
_href = URL(c="asset", f="asset"),
_id = "add-btn",
_class = "action-btn",
)
return output
s3.postp = postp
# REST Interface
#orgname = session.s3.hrm.orgname
_attr = {"csv_stylesheet": ("hrm", "person.xsl"),
"csv_template": "staff",
"csv_extra_fields": [{"label": "Type",
"field": s3db.hrm_human_resource.type,
},
],
# Better in the native person controller (but this isn't always accessible):
#"deduplicate": "",
#"orgname": orgname,
"replace_option": T("Remove existing data before import"),
"rheader": hrm_rheader,
}
_attr.update(attr)
return current.rest_controller("pr", "person", **_attr)
# =============================================================================
def hrm_training_controller():
"""
Training Controller, defined in the model for use from
multiple controllers for unified menus
- used for Searching for Participants
- used for Adding/Editing on Profile page
"""
s3db = current.s3db
def prep(r):
method = r.method
if r.interactive or r.representation == "aadata":
s3db.configure("hrm_training",
#insertable = False,
listadd = False,
)
if method in ("create", "update"):
# Coming from Profile page?
person_id = r.get_vars.get("~.person_id", None)
if person_id:
field = s3db.hrm_training.person_id
field.default = person_id
field.readable = field.writable = False
# @ToDo: Complete
#elif method == "import":
# # Allow course to be populated onaccept from training_event_id
# table = s3db.hrm_training
# s3db.configure("hrm_training",
# onvalidation = hrm_training_onvalidation,
# )
# table.course_id.requires = IS_EMPTY_OR(table.course_id.requires)
# f = table.training_event_id
# training_event_id = r.get_vars.get("~.training_event_id", None)
# if training_event_id:
# f.default = training_event_id
# else:
# f.writable = True
if method == "report":
# Configure virtual fields for reports
s3db.configure("hrm_training", extra_fields=["date"])
table = s3db.hrm_training
table.year = Field.Method("year", hrm_training_year)
table.month = Field.Method("month", hrm_training_month)
# Can't reliably link to persons as these are imported in random order
# - do this onimport if desired (see RMS)
#elif method == "import":
# # If users accounts are created for imported participants
# s3db.configure("auth_user",
# create_onaccept = lambda form: current.auth.s3_approve_user(form.vars),
# )
return True
current.response.s3.prep = prep
return current.rest_controller("hrm", "training",
csv_stylesheet = ("hrm", "training.xsl"),
csv_template = ("hrm", "training"),
csv_extra_fields = [{"label": "Training Event",
"field": s3db.hrm_training.training_event_id,
},
],
)
# =============================================================================
def hrm_training_event_controller():
"""
Training Event Controller, defined in the model for use from
multiple controllers for unified menus
"""
s3 = current.response.s3
def prep(r):
if r.component_name == "target":
tablename = "dc_target"
# Simplify
table = r.component.table
table.location_id.readable = table.location_id.writable = False
#table.organisation_id.readable = table.organisation_id.writable = False
#table.comments.readable = table.comments.writable = False
# CRUD strings
T = current.T
label = current.deployment_settings.get_dc_response_label()
if label == "Survey":
#label = T("Survey")
s3.crud_strings[tablename] = Storage(
label_create = T("Create Survey"),
title_display = T("Survey Details"),
title_list = T("Surveys"),
title_update = T("Edit Survey"),
title_upload = T("Import Surveys"),
label_list_button = T("List Surveys"),
label_delete_button = T("Delete Survey"),
msg_record_created = T("Survey added"),
msg_record_modified = T("Survey updated"),
msg_record_deleted = T("Survey deleted"),
msg_list_empty = T("No Surveys currently registered"),
)
else:
#label = T("Assessment")
s3.crud_strings[tablename] = Storage(
label_create = T("Create Assessment"),
title_display = T("Assessment Details"),
title_list = T("Assessments"),
title_update = T("Edit Assessment"),
title_upload = T("Import Assessments"),
label_list_button = T("List Assessments"),
label_delete_button = T("Delete Assessment"),
msg_record_created = T("Assessment added"),
msg_record_modified = T("Assessment updated"),
msg_record_deleted = T("Assessment deleted"),
msg_list_empty = T("No Assessments currently registered"),
)
# Open in native controller
current.s3db.configure(tablename,
linkto = lambda record_id: \
URL(c="dc", f="target",
args = [record_id, "read"],
),
linkto_update = lambda record_id: \
URL(c="dc", f="target",
args = [record_id, "update"],
),
)
elif r.component_name == "participant" and \
(r.interactive or \
r.representation in ("aadata", "pdf", "xls")):
# Use appropriate CRUD strings
T = current.T
s3.crud_strings["hrm_training"] = Storage(
label_create = T("Add Participant"),
title_display = T("Participant Details"),
title_list = T("Participants"),
title_update = T("Edit Participant"),
title_upload = T("Import Participants"),
label_list_button = T("List Participants"),
label_delete_button = T("Remove Participant"),
msg_record_created = T("Participant added"),
msg_record_modified = T("Participant updated"),
msg_record_deleted = T("Participant removed"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no Participants registered"),
)
# Hide/default fields which get populated from the Event
record = r.record
s3db = current.s3db
table = s3db.hrm_training
field = table.course_id
field.readable = False
field.writable = False
field.default = record.course_id
field = table.date
field.readable = False
field.writable = False
field.default = record.start_date
field = table.hours
field.readable = False
field.writable = False
field.default = record.hours
# Suitable list_fields
settings = current.deployment_settings
list_fields = ["person_id",
]
if settings.get_hrm_use_job_titles():
list_fields.append((T("Job Title"), "job_title")) # Field.Method
list_fields += [(settings.get_hrm_organisation_label(), "organisation"), # Field.Method
"grade",
]
if settings.get_hrm_course_pass_marks():
list_fields.append("grade_details")
if settings.get_hrm_use_certificates():
list_fields.append("certification_from_training.number")
s3db.configure("hrm_training",
list_fields = list_fields
)
return True
s3.prep = prep
#def postp(r, output):
# if r.interactive:
# # @ToDo: Restore once the other part is working
# if r.component_name == "participant" and \
# isinstance(output, dict):
# showadd_btn = output.get("showadd_btn", None)
# if showadd_btn:
# # Add an Import button
# if s3.crud.formstyle == "bootstrap":
# _class = "s3_modal"
# else:
# _class = "action-btn s3_modal"
# import_btn = crud_button(label = current.T("Import Participants"),
# _class = _class,
# _href = URL(f="training", args="import.popup",
# vars={"~.training_event_id":r.id}),
# )
# output["showadd_btn"] = TAG[""](showadd_btn, import_btn)
# return output
#s3.postp = postp
return current.rest_controller("hrm", "training_event",
rheader = hrm_rheader,
)
# =============================================================================
def hrm_xls_list_fields(r, staff=True, vol=True):
"""
Configure Human Resource list_fields for XLS Export
- match the XLS Import
- no l10n if column labels
- simple represents
"""
s3db = current.s3db
settings = current.deployment_settings
table = r.table
table.organisation_id.represent = s3db.org_OrganisationRepresent(acronym = False,
parent = False,
)
table.site_id.represent = s3db.org_SiteRepresent(show_type = False)
ptable = s3db.pr_person
ptable.middle_name.represent = lambda v: v or ""
ptable.last_name.represent = lambda v: v or ""
list_fields = [("First Name", "person_id$first_name"),
("Middle Name", "person_id$middle_name"),
("Last Name", "person_id$last_name"),
]
if staff and vol:
list_fields.insert(0, ("Type", "type"))
if settings.get_hrm_use_code():
list_fields.append(("Staff ID", "code"))
list_fields.append(("Sex", "person_id$gender"))
#if settings.get_hrm_multiple_orgs():
if settings.get_org_branches():
# @ToDo: Smart Handling for emptying the Root if org == root
# @ToDo: Smart Handling for when we have Sub-Branches
list_fields += [(settings.get_hrm_root_organisation_label(), "organisation_id$root_organisation"), # Not imported
("Organisation", "organisation_id"),
]
else:
list_fields.append(("Organisation", "organisation_id"))
if (staff and settings.get_hrm_use_job_titles()) or \
(vol and settings.get_hrm_vol_roles()):
table.job_title_id.represent = S3Represent("hrm_job_title", none="", translate=True) # Need to reinitialise to get the new value for none
list_fields.append(("Job Title", "job_title_id"))
if (staff and settings.get_hrm_staff_departments()) or \
(vol and settings.get_hrm_vol_departments()):
table.department_id.represent = S3Represent("hrm_department", none="") # Need to reinitialise to get the new value for none
list_fields.append(("Department", "department_id"))
if staff or ("site_id" in settings.get_hrm_location_vol()):
list_fields += [("Office", "site_id"),
("Facility Type", "site_id$instance_type"),
]
list_fields += [("Email", "email.value"),
("Mobile Phone", "phone.value"),
("DOB", "person_id$date_of_birth"),
("Start Date", "start_date"),
("End Date", "end_date"), # Not reimported
("Status", "status"),
("Essential", "essential"), # Not reimported
]
gtable = s3db.gis_location
levels = current.gis.get_relevant_hierarchy_levels()
for level in levels:
gtable[level].represent = lambda v: v or ""
if level == "L0":
list_fields.append(("Home Country", "home_address.location_id$%s" % level))
else:
list_fields.append(("Home %s" % level, "home_address.location_id$%s" % level))
gtable.addr_street.represent = lambda v: v or ""
list_fields.append(("Home Address", "home_address.location_id$addr_street"))
if settings.get_gis_postcode_selector():
gtable.addr_postcode.represent = lambda v: v or ""
list_fields.append(("Home Postcode", "home_address.location_id$addr_postcode"))
if settings.get_hrm_use_trainings():
s3db.hrm_training.course_id.represent = S3Represent("hrm_course", none="", translate=True) # Need to reinitialise to get the new value for none
list_fields.append(("Trainings", "person_id$training.course_id"))
if settings.get_hrm_use_certificates():
# @ToDo: Make Importable
s3db.hrm_certification.certificate_id.represent = S3Represent("hrm_certificate", none="") # Need to reinitialise to get the new value for none
list_fields.append(("Certificates", "person_id$certification.certificate_id"))
if settings.get_hrm_use_skills():
s3db.hrm_competency.skill_id.represent = S3Represent("hrm_skill", none="") # Need to reinitialise to get the new value for none
list_fields.append(("Skills", "person_id$competency.skill_id"))
if settings.get_hrm_use_education():
etable = s3db.pr_education
etable.level_id.represent = S3Represent("pr_education_level", none="") # Need to reinitialise to get the new value for none
etable.award.represent = lambda v: v or ""
etable.major.represent = lambda v: v or ""
etable.grade.represent = lambda v: v or ""
etable.year.represent = lambda v: v or ""
etable.institute.represent = lambda v: v or ""
list_fields.extend((("Education Level", "person_id$education.level_id"),
("Degree Name", "person_id$education.award"),
("Major", "person_id$education.major"),
("Grade", "person_id$education.grade"),
("Year", "person_id$education.year"),
("Institute", "person_id$education.institute"),
))
if vol:
if settings.get_hrm_vol_active():
list_fields.append(("Active", "details.active"))
if settings.get_hrm_vol_experience() in ("programme", "both"):
# @ToDo: Make Importable
s3db.hrm_programme_hours.programme_id.represent = S3Represent("hrm_programme", none="") # Need to reinitialise to get the new value for none
list_fields.append(("Programs", "person_id$hours.programme_id"))
if settings.get_hrm_use_awards():
list_fields.append(("Awards", "person_id$award.award_id"))
list_fields.append(("Comments", "comments"))
r.resource.configure(list_fields = list_fields)
return list_fields
# =============================================================================
class hrm_CV(S3Method):
"""
Curriculum Vitae, custom profile page with multiple DataTables:
* Awards
* Education
* Experience
* Training
* Skills
"""
def __init__(self, form=None):
"""
Constructor
@param form: widget config to inject at the top of the CV,
or a callable to produce such a widget config
"""
super(hrm_CV, self).__init__()
self.form = form
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Entry point for REST API
@param r: the S3Request
@param attr: controller arguments
"""
if r.name == "person" and \
r.id and \
not r.component and \
r.representation in ("html", "aadata"):
T = current.T
s3db = current.s3db
get_config = s3db.get_config
settings = current.deployment_settings
tablename = r.tablename
if r.controller == "vol":
controller = "vol"
vol = True
elif r.controller == "deploy":
controller = "deploy"
vol = False
elif r.controller == "member":
controller = "member"
vol = False
else:
controller = "hrm"
vol = False
def dt_row_actions(component, tablename):
def row_actions(r, list_id):
editable = get_config(tablename, "editable")
if editable is None:
editable = True
deletable = get_config(tablename, "deletable")
if deletable is None:
deletable = True
if editable:
# HR Manager
actions = [{"label": T("Open"),
"url": r.url(component = component,
component_id = "[id]",
method = "update.popup",
vars = {"refresh": list_id},
),
"_class": "action-btn edit s3_modal",
},
]
else:
# Typically the User's personal profile
actions = [{"label": T("Open"),
"url": r.url(component = component,
component_id = "[id]",
method = "read.popup",
vars = {"refresh": list_id},
),
"_class": "action-btn edit s3_modal",
},
]
if deletable:
actions.append({"label": T("Delete"),
"_ajaxurl": r.url(component = component,
component_id = "[id]",
method = "delete.json",
),
"_class": "action-btn delete-btn-ajax dt-ajax-delete",
})
return actions
return row_actions
profile_widgets = []
form = self.form
if form:
if callable(form):
form = form(r)
if form is not None:
profile_widgets.append(form)
if vol and settings.get_hrm_use_awards():
tablename = "vol_volunteer_award"
r.customise_resource(tablename)
widget = {# Use CRUD Strings (easier to customise)
#"label": "Awards",
#"label_create": "Add Award",
"type": "datatable",
"actions": dt_row_actions("award", tablename),
"tablename": tablename,
"context": "person",
"create_controller": "vol",
"create_function": "person",
"create_component": "award",
"pagesize": None, # all records
}
profile_widgets.append(widget)
if settings.get_hrm_use_education():
tablename = "pr_education"
widget = {"label": "Education",
"label_create": "Add Education",
"type": "datatable",
"actions": dt_row_actions("education", tablename),
"tablename": tablename,
"context": "person",
"create_controller": controller,
"create_function": "person",
"create_component": "education",
"pagesize": None, # all records
}
profile_widgets.append(widget)
if vol:
vol_experience = settings.get_hrm_vol_experience()
experience = vol_experience in ("both", "experience")
missions = None
else:
staff_experience = settings.get_hrm_staff_experience()
experience = staff_experience in ("both", "experience")
missions = staff_experience in ("both", "missions")
if experience:
tablename = "hrm_experience"
r.customise_resource(tablename)
widget = {# Use CRUD Strings (easier to customise)
#"label": "Experience",
#"label_create": "Add Experience",
"type": "datatable",
"actions": dt_row_actions("experience", tablename),
"tablename": tablename,
"context": "person",
"filter": FS("assignment__link.assignment_id") == None,
"create_controller": controller,
"create_function": "person",
"create_component": "experience",
"pagesize": None, # all records
# Settings suitable for RMS
"list_fields": ["start_date",
"end_date",
"employment_type",
"organisation",
"job_title",
],
}
profile_widgets.append(widget)
if missions:
tablename = "hrm_experience"
widget = {"label": "Missions",
"type": "datatable",
"actions": dt_row_actions("experience", tablename),
"tablename": tablename,
"context": "person",
"filter": FS("assignment__link.assignment_id") != None,
"insert": False,
"pagesize": None, # all records
# Settings suitable for RMS
"list_fields": ["start_date",
"end_date",
"location_id",
#"organisation_id",
"job_title_id",
"job_title",
],
}
profile_widgets.append(widget)
if settings.get_hrm_use_trainings():
tablename = "hrm_training"
if settings.get_hrm_trainings_external():
widget = {"label": "Internal Training",
"label_create": "Add Internal Training",
"type": "datatable",
"actions": dt_row_actions("training", tablename),
"tablename": tablename,
"context": "person",
"filter": FS("course_id$external") == False,
"create_controller": controller,
"create_function": "person",
"create_component": "training",
"pagesize": None, # all records
}
profile_widgets.append(widget)
widget = {"label": "External Training",
"label_create": "Add External Training",
"type": "datatable",
"actions": dt_row_actions("training", tablename),
"tablename": tablename,
"context": "person",
"filter": FS("course_id$external") == True,
"create_controller": controller,
"create_function": "person",
"create_component": "training",
"pagesize": None, # all records
}
profile_widgets.append(widget)
else:
widget = {"label": "Training",
"label_create": "Add Training",
"type": "datatable",
"actions": dt_row_actions("training", tablename),
"tablename": tablename,
"context": "person",
"create_controller": controller,
"create_function": "person",
"create_component": "training",
"pagesize": None, # all records
}
profile_widgets.append(widget)
if settings.get_hrm_use_skills():
tablename = "hrm_competency"
r.customise_resource(tablename)
widget = {# Use CRUD Strings (easier to customise)
#"label": label,
#"label_create": "Add Skill",
"type": "datatable",
"actions": dt_row_actions("competency", tablename),
"tablename": tablename,
"context": "person",
"create_controller": controller,
"create_function": "person",
"create_component": "competency",
"pagesize": None, # all records
}
profile_widgets.append(widget)
if settings.get_hrm_use_certificates():
tablename = "hrm_certification"
widget = {"label": "Certificates",
"label_create": "Add Certificate",
"type": "datatable",
"actions": dt_row_actions("certification", tablename),
"tablename": tablename,
"context": "person",
"create_controller": controller,
"create_function": "person",
"create_component": "certification",
"pagesize": None, # all records
}
profile_widgets.append(widget)
# Person isn't a doc_id
#if settings.has_module("doc"):
# tablename = "doc_document"
# widget = {"label": "Documents",
# "label_create": "Add Document",
# "type": "datatable",
# "actions": dt_row_actions("document", tablename),
# "tablename": tablename,
# "filter": FS("doc_id") == record.doc_id,
# "icon": "attachment",
# "create_controller": controller,
# "create_function": "person",
# "create_component": "document",
# "pagesize": None, # all records
# }
# profile_widgets.append(widget)
if r.representation == "html":
response = current.response
# Maintain normal rheader for consistency
rheader = attr["rheader"]
profile_header = TAG[""](H2(response.s3.crud_strings["pr_person"].title_display),
DIV(rheader(r),
_id = "rheader",
),
)
else:
profile_header = None
s3db.configure(tablename,
profile_cols = 1,
profile_header = profile_header,
profile_widgets = profile_widgets,
)
profile = S3Profile()
profile.tablename = tablename
profile.request = r
output = profile.profile(r, **attr)
if r.representation == "html":
output["title"] = response.title = T("CV")
return output
else:
r.error(405, current.ERROR.BAD_METHOD)
# =============================================================================
class hrm_Medical(S3Method):
"""
HR Medical Tab, custom profile page with multiple elements:
* Physical Description
* Insurance
NB It is expected to create S3SQLCustomForm for these in
customise_hrm_insurance_resource
customise_pr_physical_description_resource
"""
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Entry point for REST API
@param r: the S3Request
@param attr: controller arguments
"""
if r.name != "person" or not r.id or r.component:
r.error(405, current.ERROR.BAD_METHOD)
representation = r.representation
if representation not in ("html", "aadata"):
r.error(405, current.ERROR.BAD_METHOD)
T = current.T
s3db = current.s3db
response = current.response
s3 = response.s3
crud_strings = s3.crud_strings
tablename = r.tablename
# Redefine as non-multiple
s3db.add_components("hrm_human_resource",
hrm_insurance = {"joinby": "human_resource_id",
"multiple": False,
},
)
r.customise_resource("hrm_insurance")
r.customise_resource("pr_physical_description")
profile_widgets = [
{"label": "",
"type": "form",
#"tablename": "pr_physical_description",
#"context": "person",
#"filter": FS("pe_id") == r.record.pe_id,
"tablename": "pr_person",
"context": ("id", "id"),
"sqlform": S3SQLCustomForm("physical_description.blood_type",
"physical_description.medical_conditions",
"physical_description.medication",
"physical_description.diseases",
"physical_description.allergic",
"physical_description.allergies",
),
},
{"label": T("Medical Coverage"),
"type": "form",
"tablename": "hrm_human_resource",
"context": "person",
"sqlform": S3SQLCustomForm("insurance.insurance_number",
"insurance.phone",
"insurance.insurer",
),
},
]
if representation == "html":
# Maintain normal rheader for consistency
title = crud_strings["pr_person"].title_display
PROFILE = "profile" in r.get_vars
profile_header = TAG[""](H2(title),
DIV(hrm_rheader(r, profile=PROFILE),
_id = "rheader",
))
s3.jquery_ready.append('''S3.showHidden('%s',%s,'%s')''' % \
("allergic", json.dumps(["allergies"], separators=SEPARATORS), "pr_person_sub_physical_description"))
else:
profile_header = None
s3db.configure(tablename,
profile_cols = 1,
profile_header = profile_header,
profile_widgets = profile_widgets,
)
profile = S3Profile()
profile.tablename = tablename
profile.request = r
output = profile.profile(r, **attr)
if representation == "html":
output["title"] = response.title = title
return output
# =============================================================================
class hrm_Record(S3Method):
"""
HR Record, custom profile page with multiple DataTables:
* Human Resource
* Hours (for volunteers)
* Teams
"""
def __init__(self,
salary = False,
awards = False,
disciplinary_record = False,
org_experience = False,
other_experience = False
):
"""
Constructor
@param salary: show a Salary widget
@param awards: show an Awards History widget
@param disciplinary_record: show a Disciplinary Record widget
@param org_experience: show widget with Professional Experience
within registered organisations, can be a
dict with overrides for widget defaults
@param other_experience: show widget with Other Experience, can
be a dict with overrides for widget defaults
"""
super(hrm_Record, self).__init__()
self.salary = salary
self.awards = awards
self.disciplinary_record = disciplinary_record
self.org_experience = org_experience
self.other_experience = other_experience
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Entry point for REST API
@param r: the S3Request
@param attr: controller arguments
"""
if r.name != "person" or not r.id or r.component:
r.error(405, current.ERROR.BAD_METHOD)
representation = r.representation
if representation not in ("html", "aadata"):
r.error(405, current.ERROR.BAD_METHOD)
r.customise_resource("hrm_human_resource")
T = current.T
s3db = current.s3db
response = current.response
crud_strings = response.s3.crud_strings
settings = current.deployment_settings
tablename = r.tablename
if r.controller == "vol":
VOL = True
controller = "vol"
else:
VOL = r.get_vars["group"] == "volunteer"
controller = "hrm"
# @ToDo: Check editable/deletable config if-necessary (see hrm_CV)
def dt_row_actions(component):
return lambda r, list_id: [
{"label": T("Open"),
"url": r.url(component = component,
component_id = "[id]",
method = "update.popup",
vars = {"refresh": list_id},
),
"_class": "action-btn edit s3_modal",
},
{"label": T("Delete"),
"_ajaxurl": r.url(component = component,
component_id = "[id]",
method = "delete.json",
),
"_class": "action-btn delete-btn-ajax dt-ajax-delete",
},
]
table = s3db.hrm_human_resource
label = settings.get_hrm_record_label()
code = table.code
if VOL:
widget_filter = FS("type") == 2
if settings.get_hrm_use_code() is True:
code.readable = code.writable = True
#elif controller = "hrm":
else:
#widget_filter = FS("type") == 1
widget_filter = None
if settings.get_hrm_use_code():
code.readable = code.writable = True
profile_widgets = [
{"label": label,
"type": "form",
"tablename": "hrm_human_resource",
"context": "person",
"filter": widget_filter,
},
]
if VOL:
vol_experience = settings.get_hrm_vol_experience()
if vol_experience in ("programme", "both"):
ctablename = "hrm_programme_hours"
# Exclude records which are just to link to Programme
filter_ = (FS("hours") != None)
list_fields = ["id",
"date",
]
phtable = s3db.hrm_programme_hours
r.customise_resource(ctablename)
if phtable.programme_id.readable:
list_fields.append("programme_id")
# Exclude Training Hours
filter_ &= (FS("programme_id") != None)
if phtable.place.readable:
# RMS
list_fields += ["place",
"event",
]
if phtable.job_title_id.readable:
list_fields.append("job_title_id")
list_fields.append("hours")
crud_strings_ = crud_strings[ctablename]
hours_widget = {"label": crud_strings_["title_list"],
"label_create": crud_strings_["label_create"],
"type": "datatable",
"actions": dt_row_actions("hours"),
"tablename": ctablename,
"context": "person",
"filter": filter_,
"list_fields": list_fields,
"create_controller": controller,
"create_function": "person",
"create_component": "hours",
"pagesize": None, # all records
}
profile_widgets.append(hours_widget)
elif vol_experience == "activity":
# Exclude records which are just to link to Activity & also Training Hours
#filter_ = (FS("hours") != None) & \
# (FS("activity_id") != None)
list_fields = ["id",
"date",
"activity_id",
"job_title_id",
"hours",
]
#if s3db.vol_activity_hours.job_title_id.readable:
# list_fields.append("job_title_id")
#list_fields.append("hours")
hours_widget = {"label": "Activity Hours",
# Don't Add Hours here since the Activity List will be very hard to find the right one in
"insert": False,
#"label_create": "Add Activity Hours",
"type": "datatable",
"actions": dt_row_actions("hours"),
"tablename": "vol_activity_hours",
"context": "person",
#"filter": filter_,
"list_fields": list_fields,
#"create_controller": controller,
#"create_function": "person",
#"create_component": "activity_hours",
"pagesize": None, # all records
}
profile_widgets.append(hours_widget)
teams = settings.get_hrm_teams()
if teams:
hrm_configure_pr_group_membership()
if teams == "Teams":
label_create = "Add Team"
elif teams == "Groups":
label_create = "Add Group"
teams_widget = {"label": teams,
"label_create": label_create,
"type": "datatable",
"actions": dt_row_actions("group_membership"),
"tablename": "pr_group_membership",
"context": "person",
"create_controller": controller,
"create_function": "person",
"create_component": "group_membership",
"pagesize": None, # all records
}
profile_widgets.append(teams_widget)
if controller == "hrm":
org_experience = self.org_experience
if org_experience:
# Use primary hrm/experience controller
# (=> defaults to staff-style experience form)
# Need different action URLs
def experience_row_actions(component):
return lambda r, list_id: [
{"label": T("Open"),
"url": URL(f="experience",
args = ["[id]", "update.popup"],
vars = {"refresh": list_id},
),
"_class": "action-btn edit s3_modal",
},
{"label": T("Delete"),
"_ajaxurl": URL(f="experience",
args = ["[id]", "delete.json"],
),
"_class": "action-btn delete-btn-ajax dt-ajax-delete",
},
]
# Configure widget, apply overrides
widget = {"label": T("Experience"),
"label_create": T("Add Experience"),
"type": "datatable",
"actions": experience_row_actions("experience"),
"tablename": "hrm_experience",
"pagesize": None, # all records
}
if isinstance(org_experience, dict):
widget.update(org_experience)
# Retain the person filter
person_filter = FS("person_id") == r.id
widget_filter = widget.get("filter")
if widget_filter:
widget["filter"] = person_filter & widget_filter
else:
widget["filter"] = person_filter
profile_widgets.append(widget)
other_experience = self.other_experience
if other_experience:
# Use experience component in hrm/person controller
# (=> defaults to vol-style experience form)
# Configure widget and apply overrides
widget = {"label": "Experience",
"label_create": "Add Experience",
"type": "datatable",
"actions": dt_row_actions("experience"),
"tablename": "hrm_experience",
"context": "person",
"create_controller": controller,
"create_function": "person",
"create_component": "experience",
"pagesize": None, # all records
}
if isinstance(other_experience, dict):
widget.update(other_experience)
profile_widgets.append(widget)
if self.awards:
widget = {"label": T("Awards"),
"label_create": T("Add Award"),
"type": "datatable",
"actions": dt_row_actions("staff_award"),
"tablename": "hrm_award",
"context": "person",
"create_controller": controller,
"create_function": "person",
"create_component": "staff_award",
"pagesize": None, # all records
}
profile_widgets.append(widget)
if self.disciplinary_record:
widget = {"label": T("Disciplinary Record"),
"label_create": T("Add Disciplinary Action"),
"type": "datatable",
"actions": dt_row_actions("disciplinary_action"),
"tablename": "hrm_disciplinary_action",
"context": "person",
"create_controller": controller,
"create_function": "person",
"create_component": "disciplinary_action",
"pagesize": None, # all records
}
profile_widgets.append(widget)
if self.salary:
widget = {"label": T("Salary"),
"label_create": T("Add Salary"),
"type": "datatable",
"actions": dt_row_actions("salary"),
"tablename": "hrm_salary",
"context": "person",
"create_controller": controller,
"create_function": "person",
"create_component": "salary",
"pagesize": None, # all records
}
profile_widgets.append(widget)
if representation == "html":
# Maintain normal rheader for consistency
title = crud_strings["pr_person"].title_display
PROFILE = "profile" in r.get_vars
profile_header = TAG[""](H2(title),
DIV(hrm_rheader(r, profile=PROFILE),
_id = "rheader",
))
else:
profile_header = None
s3db.configure(tablename,
profile_cols = 1,
profile_header = profile_header,
profile_widgets = profile_widgets,
)
profile = S3Profile()
profile.tablename = tablename
profile.request = r
output = profile.profile(r, **attr)
if representation == "html":
output["title"] = response.title = title
return output
# =============================================================================
def hrm_configure_salary(r):
"""
Configure the salary tab
@param r: the S3Request
"""
hr_id = None
multiple = False
# Get all accessible HR records of this person
resource = r.resource
rows = resource.select(["human_resource.id",
"human_resource.type",
], as_rows=True)
# Only staff records, of course
rows = [row for row in rows if row["hrm_human_resource.type"] == 1]
HR_ID = "hrm_human_resource.id"
if len(rows) == 1:
hr_id = rows[0][HR_ID]
multiple = False
else:
hr_id = [row[HR_ID] for row in rows]
multiple = True
component = r.component
ctable = component.table
field = ctable.human_resource_id
list_fields = [fs for fs in component.list_fields() if fs != "person_id"]
if multiple or not hr_id:
# Default to the staff record selected in URL
default_hr_id = hr_id
if "human_resource.id" in r.get_vars:
try:
default_hr_id = int(r.get_vars["human_resource.id"])
except ValueError:
pass
if default_hr_id in hr_id:
field.default = default_hr_id
# Filter field options
field.requires = IS_ONE_OF(current.db, "hrm_human_resource.id",
current.s3db.hrm_human_resource_represent,
sort = True,
filterby = "id",
filter_opts = hr_id,
)
# Show the list_field
if "human_resource_id" not in list_fields:
list_fields.insert(1, "human_resource_id")
else:
# Only one HR record => set as default and make read-only
field.default = hr_id
field.writable = False
# Hiding the field can be confusing if there are mixed single/multi HR
#field.readable = False
# Hide the list field
if "human_resource_id" in list_fields:
list_fields.remove("human_resource_id")
component.configure(list_fields = list_fields)
# =============================================================================
def hrm_configure_pr_group_membership():
"""
Configures the labels and CRUD Strings of pr_group_membership
"""
T = current.T
s3db = current.s3db
settings = current.deployment_settings
request = current.request
function = request.function
tablename = "pr_group_membership"
table = s3db.pr_group_membership
if settings.get_hrm_teams() == "Teams":
table.group_id.label = T("Team Name")
table.group_head.label = T("Team Leader")
if function == "person":
ADD_MEMBERSHIP = T("Add Membership")
current.response.s3.crud_strings[tablename] = Storage(
label_create = ADD_MEMBERSHIP,
title_display = T("Membership Details"),
title_list = T("Memberships"),
title_update = T("Edit Membership"),
label_list_button = T("List Memberships"),
label_delete_button = T("Delete Membership"),
msg_record_created = T("Added to Team"),
msg_record_modified = T("Membership updated"),
msg_record_deleted = T("Removed from Team"),
msg_list_empty = T("Not yet a Member of any Team"),
)
elif function in ("group", "group_membership"):
ADD_MEMBER = T("Add Team Member")
current.response.s3.crud_strings[tablename] = Storage(
label_create = ADD_MEMBER,
title_display = T("Membership Details"),
title_list = T("Team Members"),
title_update = T("Edit Membership"),
label_list_button = T("List Members"),
label_delete_button = T("Remove Person from Team"),
msg_record_created = T("Person added to Team"),
msg_record_modified = T("Membership updated"),
msg_record_deleted = T("Person removed from Team"),
msg_list_empty = T("This Team has no Members yet"),
)
else:
table.group_head.label = T("Group Leader")
if function in ("group", "group_membership"):
# Don't create Persons here as they need to be HRMs
table.person_id.comment = None
phone_label = settings.get_ui_label_mobile_phone()
site_label = settings.get_org_site_label()
list_fields = ["person_id",
"group_head",
(T("Email"), "person_id$email.value"),
(phone_label, "person_id$phone.value"),
(current.messages.ORGANISATION,
"person_id$human_resource.organisation_id"),
(site_label, "person_id$human_resource.site_id"),
]
name_format = settings.get_pr_name_format()
test = name_format % {"first_name": 1,
"middle_name": 2,
"last_name": 3,
}
test = "".join(ch for ch in test if ch in ("1", "2", "3"))
if test[:1] == "1":
orderby = "pr_person.first_name"
elif test[:1] == "2":
orderby = "pr_person.middle_name"
else:
orderby = "pr_person.last_name"
else:
# Person
list_fields = ["group_id",
"group_head",
"group_id$description",
]
orderby = table.group_id
s3db.configure(tablename,
list_fields = list_fields,
orderby = orderby,
)
# =============================================================================
def hrm_competency_list_layout(list_id, item_id, resource, rfields, record):
"""
Default dataList item renderer for Skills on the HRM Profile
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["hrm_competency.id"]
item_class = "thumbnail"
raw = record._row
title = record["hrm_competency.skill_id"]
organisation = raw["hrm_competency.organisation_id"] or ""
if organisation:
#org_url = URL(c="org", f="organisation",
# args = [organisation, "profile"],
# )
org_url = URL(c="org", f="organisation",
args = [organisation],
)
organisation = P(ICON("organisation"),
" ",
SPAN(A(record["hrm_competency.organisation_id"],
_href = org_url,
)
),
" ",
_class = "card_1_line",
)
competency = raw["hrm_competency.competency_id"] or ""
if competency:
competency = P(ICON("certificate"),
" ",
SPAN(record["hrm_competency.competency_id"]),
" ",
_class = "card_1_line",
)
comments = raw["hrm_competency.comments"] or ""
# Edit Bar
permit = current.auth.s3_has_permission
table = current.s3db.hrm_competency
if permit("update", table, record_id=record_id):
controller = current.request.controller
if controller not in ("vol", "deploy"):
controller = "hrm"
edit_btn = A(ICON("edit"),
_href = URL(c=controller, f="competency",
args = [record_id, "update.popup"],
vars = {"refresh": list_id,
"record": record_id,
},
),
_class = "s3_modal",
_title = current.T("Edit Skill"),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(ICON("delete"),
_class = "dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class = "edit-bar fright",
)
# Render the item
item = DIV(DIV(ICON("icon"),
SPAN(" %s" % title,
_class = "card-title",
),
edit_bar,
_class = "card-header",
),
DIV(DIV(DIV(organisation,
competency,
P(SPAN(comments),
" ",
_class = "card_manylines",
),
_class = "media",
),
_class = "media-body",
),
_class = "media",
),
_class = item_class,
_id = item_id,
)
return item
# =============================================================================
def hrm_credential_list_layout(list_id, item_id, resource, rfields, record):
"""
Default dataList item renderer for Credentials on the HRM Profile
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["hrm_credential.id"]
item_class = "thumbnail"
raw = record["_row"]
start_date = raw["hrm_credential.start_date"]
end_date = raw["hrm_credential.end_date"]
if start_date or end_date:
if start_date and end_date:
dates = "%s - %s" % (record["hrm_credential.start_date"],
record["hrm_credential.end_date"],
)
elif start_date:
dates = "%s - " % record["hrm_credential.start_date"]
else:
dates = " - %s" % record["hrm_credential.end_date"]
date = P(ICON("calendar"),
" ",
SPAN(dates),
" ",
_class = "card_1_line",
)
else:
date = ""
# Edit Bar
permit = current.auth.s3_has_permission
table = current.s3db.hrm_credential
if permit("update", table, record_id=record_id):
controller = current.request.controller
if controller not in ("vol", "deploy"):
controller = "hrm"
edit_btn = A(ICON("edit"),
_href = URL(c=controller, f="credential",
args = [record_id, "update.popup"],
vars = {"refresh": list_id,
"record": record_id,
},
),
_class = "s3_modal",
_title = current.response.s3.crud_strings["hrm_credential"].title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(ICON("delete"),
_class = "dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class = "edit-bar fright",
)
# Render the item
item = DIV(DIV(ICON("icon"),
SPAN(" %s" % record["hrm_credential.job_title_id"],
_class = "card-title",
),
edit_bar,
_class = "card-header",
),
DIV(DIV(DIV(date,
_class = "media",
),
_class = "media-body",
),
_class = "media",
),
_class = item_class,
_id = item_id,
)
return item
# =============================================================================
def hrm_experience_list_layout(list_id, item_id, resource, rfields, record):
"""
Default dataList item renderer for Experience on the HRM Profile
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["hrm_experience.id"]
item_class = "thumbnail"
raw = record._row
card_line = lambda icon, item: P(ICON(icon),
SPAN(item),
_class = "card_1_line",
)
# Organisation
colname = "hrm_experience.organisation_id"
organisation_id = raw[colname]
if organisation_id:
org_url = URL(c="org", f="organisation",
args = [organisation_id],
)
organisation = A(record[colname],
_href = org_url,
)
else:
# Try free-text field
organisation = raw["hrm_experience.organisation"]
if organisation:
organisation = card_line("organisation", organisation)
else:
organisation = ""
# Activity Type
colname = "hrm_experience.activity_type"
activity_type = raw[colname]
if activity_type:
activity_type = card_line("activity", record[colname])
else:
activity_type = ""
# Key Responsibilities
colname = "hrm_experience.responsibilities"
responsibilities = raw[colname]
if responsibilities:
responsibilities = card_line("responsibility", record[colname])
else:
responsibilities = ""
# Location
colname = "hrm_experience.location_id"
location_id = raw[colname]
if location_id:
#location_url = URL(c="gis", f="location",
# args = [location_id, "profile"],
# )
location_url = URL(c="gis", f="location",
args = [location_id],
)
location = card_line("location",
A(record[colname],
_href = location_url,
))
else:
location = ""
# Hours
hours = raw["hrm_experience.hours"]
if hours:
hours = card_line("time", hours)
else:
hours = ""
# Start and End Dates
colname_start = "hrm_experience.start_date"
colname_end = "hrm_experience.end_date"
start_date = raw[colname_start]
end_date = raw[colname_end]
if start_date or end_date:
if start_date and end_date:
dates = "%s - %s" % (record[colname_start],
record[colname_end],
)
elif start_date:
dates = "%s - " % record[colname_start]
else:
dates = " - %s" % record[colname_end]
date = card_line("calendar", dates)
else:
date = ""
# Supervisor
colname = "hrm_experience.supervisor_id"
supervisor_id = raw[colname]
if supervisor_id:
#person_url = URL(c="hrm", f="person",
# args = [supervisor_id, "profile"],
# )
person_url = URL(c="hrm", f="person",
args = [supervisor_id],
)
supervisor = card_line("user",
A(record[colname],
_href = person_url,
))
else:
supervisor = ""
# Comments
comments = raw["hrm_experience.comments"] or ""
# Job title as card title, indicate employment type if given
colname = "hrm_experience.job_title_id"
if raw[colname]:
title = record[colname]
job_title = card_line("star", title)
else:
title = ""
job_title = ""
position = raw["hrm_experience.job_title"]
if position:
title = position
else:
job_title = ""
colname = "hrm_experience.employment_type"
if raw[colname]:
employment_type = record[colname]
if title:
title = "%s (%s)" % (title, employment_type)
else:
title = employment_type
# Edit Bar
permit = current.auth.s3_has_permission
table = current.s3db.hrm_experience
if permit("update", table, record_id=record_id):
controller = current.request.controller
if controller not in ("vol", "deploy"):
controller = "hrm"
edit_btn = A(ICON("edit"),
_href = URL(c=controller, f="experience",
args = [record_id, "update.popup"],
vars = {"refresh": list_id,
"record": record_id,
},
),
_class = "s3_modal",
_title = current.T("Edit Experience"),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(ICON("delete"),
_class = "dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class = "edit-bar fright",
)
# Render the item
item = DIV(DIV(ICON("icon"),
SPAN(title,
_class = "card-title",
),
edit_bar,
_class = "card-header",
),
DIV(DIV(DIV(organisation,
location,
date,
hours,
supervisor,
activity_type,
job_title,
responsibilities,
P(SPAN(comments),
" ",
_class = "card_manylines",
),
_class = "media",
),
_class = "media-body",
),
_class = "media",
),
_class = item_class,
_id = item_id,
)
return item
# =============================================================================
def hrm_training_list_layout(list_id, item_id, resource, rfields, record):
"""
Default dataList item renderer for Trainings on the HRM Profile
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["hrm_training.id"]
item_class = "thumbnail"
raw = record._row
title = record["hrm_training.course_id"]
date = raw["hrm_training.date"] or ""
if date:
date = P(ICON("calendar"),
" ",
SPAN(record["hrm_training.date"]),
" ",
_class="card_1_line",
)
grade = raw["hrm_training.grade"] or ""
if grade:
grade = P(ICON("certificate"),
" ",
SPAN(record["hrm_training.grade"]),
" ",
_class="card_1_line",
)
hours = raw["hrm_training.hours"] or ""
if hours:
hours = P(ICON("time"),
" ",
SPAN(hours),
" ",
_class="card_1_line",
)
site = raw["hrm_training_event.site_id"] or ""
if site:
#site_id = raw["hrm_training_event.site_id"]
#site_url = URL(c="org", f="site", args=[site_id, "profile"])
site_url = "#"
site = P(ICON("site"),
" ",
SPAN(A(record["hrm_training_event.site_id"],
_href = site_url,
)
),
" ",
_class="card_1_line",
)
job_title = raw["hrm_course_job_title.job_title_id"] or ""
if job_title:
job_title = P(ICON("star"),
" ",
SPAN(record["hrm_course_job_title.job_title_id"],
),
" ",
_class="card_1_line",
)
else:
job_title = ""
comments = raw["hrm_training.comments"] or ""
# Edit Bar
permit = current.auth.s3_has_permission
table = current.s3db.hrm_training
if permit("update", table, record_id=record_id):
controller = current.request.controller
if controller not in ("vol", "deploy"):
controller = "hrm"
edit_btn = A(ICON("edit"),
_href = URL(c=controller, f="training",
args = [record_id, "update.popup"],
vars = {"refresh": list_id,
"record": record_id,
},
),
_class = "s3_modal",
_title = current.T("Edit Training"),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(ICON("delete"),
_class = "dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class = "edit-bar fright",
)
# Render the item
item = DIV(DIV(ICON("icon"),
SPAN(" %s" % title,
_class = "card-title",
),
edit_bar,
_class = "card-header",
),
DIV(DIV(DIV(job_title,
site,
date,
hours,
grade,
P(SPAN(comments),
" ",
_class = "card_manylines",
),
_class = "media",
),
_class = "media-body",
),
_class = "media",
),
_class = item_class,
_id = item_id,
)
return item
# =============================================================================
def hrm_human_resource_filters(resource_type = None,
module = None,
hrm_type_opts = None):
"""
Get filter widgets for human resources
@param resource_type: the HR type (staff/volunteer/both) if
pre-determined, otherwise None to render a
filter widget
@param module: the controller prefix of the request to render
module-specific widgets, defaults to
current.request.controller
"""
T = current.T
settings = current.deployment_settings
if not module:
module = current.request.controller
text_search_fields = ["person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
"person_id$email.value",
#"organisation_id",
]
use_code = settings.get_hrm_use_code()
if use_code is True or use_code and resource_type != "volunteer":
text_search_fields.append("code")
if settings.get_hrm_use_national_id():
text_search_fields.append("person_id$national_id.value")
filter_widgets = [S3TextFilter(text_search_fields,
label = T("Search"),
),
]
append_filter = filter_widgets.append
if module == "deploy" and current.auth.s3_has_role("ADMIN"):
dotable = current.s3db.deploy_organisation
deploying_orgs = current.db(dotable.deleted == False).count()
if deploying_orgs > 1:
append_filter(S3OptionsFilter("application.organisation_id",
label = T("Deployment Team"),
))
# Type filter (only if not pre-filtered)
if not resource_type in ("staff", "volunteer"):
append_filter(S3OptionsFilter("type",
label = T("Type"),
options = hrm_type_opts,
cols = 2,
hidden = True,
))
# Region filter (only if using regions in template)
if settings.get_org_regions():
if settings.get_org_regions_hierarchical():
if module == "deploy":
hidden = False
else:
hidden = True
append_filter(S3HierarchyFilter("organisation_id$organisation_region.region_id",
label = T("Region"),
hidden = hidden,
))
else:
append_filter(S3OptionsFilter("organisation_id$organisation_region.region_id",
label = T("Region"),
hidden = True,
))
# Organisation filter
if settings.get_hrm_multiple_orgs():
if settings.get_org_branches():
append_filter(S3HierarchyFilter("organisation_id",
leafonly = False,
))
else:
append_filter(S3OptionsFilter("organisation_id",
search = True,
header = "",
#hidden = True,
))
# Location filter (always)
append_filter(S3LocationFilter("location_id",
label = T("Location"),
hidden = True,
))
# Active / Activity / Programme filters (volunteer only)
if module == "vol" or resource_type in ("both", "volunteer"):
vol_active = settings.get_hrm_vol_active()
if vol_active:
# Active filter
append_filter(S3OptionsFilter("details.active",
label = T("Active?"),
cols = 2, #3,
options = {True: T("Yes"),
False: T("No"),
#None: T("Unknown"),
},
hidden = True,
#none = True,
))
vol_experience = settings.get_hrm_vol_experience()
if vol_experience in ("programme", "both"):
# Programme filter
append_filter(S3OptionsFilter("person_id$hours.programme_id",
label = T("Program"),
#options = lambda: \
# s3_get_filter_opts("hrm_programme",
# org_filter=True),
hidden = True,
))
elif vol_experience == "activity":
# Programme filter
append_filter(S3OptionsFilter("person_id$activity_hours.activity_hours_activity_type.activity_type_id",
label = T("Activity Types"),
hidden = True,
))
if settings.get_hrm_unavailability():
# Availability Filter
append_filter(S3DateFilter("available",
label = T("Available"),
# Use custom selector to prevent automatic
# parsing (which would result in an error)
selector = "available",
hide_time = False,
hidden = True,
))
else:
# Site filter (staff only)
filter_widgets.append(S3OptionsFilter("site_id",
hidden = True,
))
if module == "deploy":
# Deployment-specific filters
# Availability Filter
append_filter(S3DateFilter("available",
label = T("Available for Deployment"),
# Use custom selector to prevent automatic
# parsing (which would result in an error)
selector = "available",
hide_time = True,
hidden = True,
))
# Job title filter
append_filter(S3OptionsFilter("credential.job_title_id",
# @ToDo: deployment_setting for label (this is RDRT-specific)
#label = T("Credential"),
label = T("Sector"),
hidden = True,
))
# Last-deployment-date filter
append_filter(S3DateFilter("human_resource_id:deploy_assignment.start_date",
label = T("Deployed"),
hide_time = True,
hidden = True,
))
# Last-response-date filter
append_filter(S3DateFilter("human_resource_id:deploy_response.created_on",
label = T("Responded"),
hide_time = True,
hidden = True,
))
# Certificate filter
if settings.get_hrm_use_certificates():
append_filter(S3OptionsFilter("certification.certificate_id",
# Better to default (easier to customise/consistency)
#label = T("Certificate"),
hidden = True,
))
# Skills filter
if settings.get_hrm_use_skills():
append_filter(S3OptionsFilter("competency.skill_id",
# Better to default (easier to customise/consistency)
#label = T("Skill"),
hidden = module != "req",
))
# Training filter
if settings.get_hrm_use_trainings():
if settings.get_hrm_training_filter_and():
append_filter(S3OptionsFilter("trainings.course_id",
label = T("Training"),
hidden = True,
operator = "contains",
))
else:
append_filter(S3OptionsFilter("training.course_id",
label = T("Training"),
hidden = True,
))
# Group (team) membership filter
teams = settings.get_hrm_teams()
if teams:
if teams == "Teams":
teams = "Team"
elif teams == "Groups":
teams = "Group"
append_filter(S3OptionsFilter("group_membership.group_id",
label = T(teams),
hidden = True,
))
return filter_widgets
# END =========================================================================
| 43.058475
| 233
| 0.417007
|
320e343943564411244c0befec2665ee0d6d3ac6
| 1,239
|
py
|
Python
|
test/neo4j_node_remove_labels_test.py
|
fabsx00/py2neo
|
80f6605499ee4cec4b338f15453e8f509a09468a
|
[
"Apache-2.0"
] | null | null | null |
test/neo4j_node_remove_labels_test.py
|
fabsx00/py2neo
|
80f6605499ee4cec4b338f15453e8f509a09468a
|
[
"Apache-2.0"
] | null | null | null |
test/neo4j_node_remove_labels_test.py
|
fabsx00/py2neo
|
80f6605499ee4cec4b338f15453e8f509a09468a
|
[
"Apache-2.0"
] | 1
|
2021-10-08T03:41:54.000Z
|
2021-10-08T03:41:54.000Z
|
#/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2011-2014, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from py2neo import neo4j, node
def test_can_remove_labels_from_node():
graph_db = neo4j.GraphDatabaseService()
if not graph_db.supports_node_labels:
return
alice, = graph_db.create(node(name="Alice"))
alice.add_labels("human", "female")
labels = alice.get_labels()
assert len(labels) == 2
assert labels == set(["human", "female"])
alice.remove_labels("human")
labels = alice.get_labels()
assert labels == set(["female"])
assert labels != set(["human", "female"])
alice.remove_labels("female")
labels = alice.get_labels()
assert labels == set()
| 32.605263
| 74
| 0.703793
|
1ddd2660c784e9a515eb52da215b8cf0144d4723
| 137
|
py
|
Python
|
sentry-handle-exceptions-django-projects/step1/djsentry/errors/views.py
|
fullstackpython/blog-code-examples
|
a6afcb874e88086686071aa1b2a47548aed5a2b0
|
[
"MIT"
] | 65
|
2017-06-13T01:02:17.000Z
|
2022-01-10T09:58:29.000Z
|
sentry-handle-exceptions-django-projects/step1/djsentry/errors/views.py
|
fullstackpython/blog-code-examples
|
a6afcb874e88086686071aa1b2a47548aed5a2b0
|
[
"MIT"
] | 1
|
2020-06-05T18:07:42.000Z
|
2020-06-05T18:07:42.000Z
|
sentry-handle-exceptions-django-projects/step1/djsentry/errors/views.py
|
fullstackpython/blog-code-examples
|
a6afcb874e88086686071aa1b2a47548aed5a2b0
|
[
"MIT"
] | 50
|
2017-07-01T02:10:19.000Z
|
2022-03-24T17:23:58.000Z
|
# djsentry/errors/views.py
from django.shortcuts import render
def errors_index(request):
return render(request, 'index.html', {})
| 19.571429
| 44
| 0.744526
|
5bfeb52d5c0d94cc90ee110535b77809fdbdd34d
| 12,263
|
py
|
Python
|
torchreid/models/osnet.py
|
rick-hao/deep-person-reid
|
87fd094a2c679518433e7e92aae1d7a0f954d6cb
|
[
"MIT"
] | null | null | null |
torchreid/models/osnet.py
|
rick-hao/deep-person-reid
|
87fd094a2c679518433e7e92aae1d7a0f954d6cb
|
[
"MIT"
] | null | null | null |
torchreid/models/osnet.py
|
rick-hao/deep-person-reid
|
87fd094a2c679518433e7e92aae1d7a0f954d6cb
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
__all__ = ['osnet_x1_0', 'osnet_x0_75', 'osnet_x0_5', 'osnet_x0_25', 'osnet_ibn_x1_0']
import torch
from torch import nn
from torch.nn import functional as F
import torchvision
##########
# Basic layers
##########
class ConvLayer(nn.Module):
"""Convolution layer (conv + bn + relu)."""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, groups=1, IN=False):
super(ConvLayer, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride,
padding=padding, bias=False, groups=groups)
if IN:
self.bn = nn.InstanceNorm2d(out_channels, affine=True)
else:
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class Conv1x1(nn.Module):
"""1x1 convolution + bn + relu."""
def __init__(self, in_channels, out_channels, stride=1, groups=1):
super(Conv1x1, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, 1, stride=stride, padding=0,
bias=False, groups=groups)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class Conv1x1Linear(nn.Module):
"""1x1 convolution + bn (w/o non-linearity)."""
def __init__(self, in_channels, out_channels, stride=1):
super(Conv1x1Linear, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, 1, stride=stride, padding=0, bias=False)
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
class Conv3x3(nn.Module):
"""3x3 convolution + bn + relu."""
def __init__(self, in_channels, out_channels, stride=1, groups=1):
super(Conv3x3, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, 3, stride=stride, padding=1,
bias=False, groups=groups)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class LightConv3x3(nn.Module):
"""Lightweight 3x3 convolution.
1x1 (linear) + dw 3x3 (nonlinear).
"""
def __init__(self, in_channels, out_channels):
super(LightConv3x3, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 1, stride=1, padding=0, bias=False)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, stride=1, padding=1, bias=False, groups=out_channels)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.bn(x)
x = self.relu(x)
return x
##########
# Building blocks for omni-scale feature learning
##########
class ChannelGate(nn.Module):
"""A mini-network that generates channel-wise gates conditioned on input tensor."""
def __init__(self, in_channels, num_gates=None, return_gates=False,
gate_activation='sigmoid', reduction=16, layer_norm=False):
super(ChannelGate, self).__init__()
if num_gates is None:
num_gates = in_channels
self.return_gates = return_gates
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(in_channels, in_channels//reduction, kernel_size=1, bias=True, padding=0)
self.norm1 = None
if layer_norm:
self.norm1 = nn.LayerNorm((in_channels//reduction, 1, 1))
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(in_channels//reduction, num_gates, kernel_size=1, bias=True, padding=0)
if gate_activation == 'sigmoid':
self.gate_activation = nn.Sigmoid()
elif gate_activation == 'relu':
self.gate_activation = nn.ReLU(inplace=True)
elif gate_activation == 'linear':
self.gate_activation = None
else:
raise RuntimeError("Unknown gate activation: {}".format(gate_activation))
def forward(self, x):
input = x
x = self.global_avgpool(x)
x = self.fc1(x)
if self.norm1 is not None:
x = self.norm1(x)
x = self.relu(x)
x = self.fc2(x)
if self.gate_activation is not None:
x = self.gate_activation(x)
if self.return_gates:
return x
return input * x
class OSBlock(nn.Module):
"""Omni-scale feature learning block."""
def __init__(self, in_channels, out_channels, IN=False, bottleneck_reduction=4, **kwargs):
super(OSBlock, self).__init__()
mid_channels = out_channels // bottleneck_reduction
self.conv1 = Conv1x1(in_channels, mid_channels)
self.conv2a = LightConv3x3(mid_channels, mid_channels)
self.conv2b = nn.Sequential(
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
)
self.conv2c = nn.Sequential(
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
)
self.conv2d = nn.Sequential(
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
)
self.gate = ChannelGate(mid_channels)
self.conv3 = Conv1x1Linear(mid_channels, out_channels)
self.downsample = None
if in_channels != out_channels:
self.downsample = Conv1x1Linear(in_channels, out_channels)
self.IN = None
if IN:
self.IN = nn.InstanceNorm2d(out_channels, affine=True)
def forward(self, x):
residual = x
x1 = self.conv1(x)
x2a = self.conv2a(x1)
x2b = self.conv2b(x1)
x2c = self.conv2c(x1)
x2d = self.conv2d(x1)
x2 = self.gate(x2a) + self.gate(x2b) + self.gate(x2c) + self.gate(x2d)
x3 = self.conv3(x2)
if self.downsample is not None:
residual = self.downsample(residual)
out = x3 + residual
if self.IN is not None:
out = self.IN(out)
return F.relu(out)
##########
# Network architecture
##########
class OSNet(nn.Module):
"""Omni-Scale Network.
Reference:
- Zhou et al. Omni-Scale Feature Learning for Person Re-Identification. ArXiv preprint, 2019.
https://arxiv.org/abs/1905.00953
"""
def __init__(self, num_classes, blocks, layers, channels, feature_dim=512, loss='softmax', IN=False, **kwargs):
super(OSNet, self).__init__()
num_blocks = len(blocks)
assert num_blocks == len(layers)
assert num_blocks == len(channels) - 1
self.loss = loss
# convolutional backbone
self.conv1 = ConvLayer(3, channels[0], 7, stride=2, padding=3, IN=IN)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.conv2 = self._make_layer(blocks[0], layers[0], channels[0], channels[1], reduce_spatial_size=True, IN=IN)
self.conv3 = self._make_layer(blocks[1], layers[1], channels[1], channels[2], reduce_spatial_size=True)
self.conv4 = self._make_layer(blocks[2], layers[2], channels[2], channels[3], reduce_spatial_size=False)
self.conv5 = Conv1x1(channels[3], channels[3])
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
# fully connected layer
self.fc = self._construct_fc_layer(feature_dim, channels[3], dropout_p=None)
# identity classification layer
self.classifier = nn.Linear(self.feature_dim, num_classes)
self._init_params()
def _make_layer(self, block, layer, in_channels, out_channels, reduce_spatial_size, IN=False):
layers = []
layers.append(block(in_channels, out_channels, IN=IN))
for i in range(1, layer):
layers.append(block(out_channels, out_channels, IN=IN))
if reduce_spatial_size:
layers.append(
nn.Sequential(
Conv1x1(out_channels, out_channels),
nn.AvgPool2d(2, stride=2)
)
)
return nn.Sequential(*layers)
def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
if fc_dims is None or fc_dims<0:
self.feature_dim = input_dim
return None
if isinstance(fc_dims, int):
fc_dims = [fc_dims]
layers = []
for dim in fc_dims:
layers.append(nn.Linear(input_dim, dim))
layers.append(nn.BatchNorm1d(dim))
layers.append(nn.ReLU(inplace=True))
if dropout_p is not None:
layers.append(nn.Dropout(p=dropout_p))
input_dim = dim
self.feature_dim = fc_dims[-1]
return nn.Sequential(*layers)
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def featuremaps(self, x):
x = self.conv1(x)
x = self.maxpool(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
return x
def forward(self, x):
x = self.featuremaps(x)
v = self.global_avgpool(x)
v = v.view(v.size(0), -1)
if self.fc is not None:
v = self.fc(v)
if not self.training:
return v
y = self.classifier(v)
if self.loss == 'softmax':
return y
elif self.loss == 'triplet':
return y, v
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
##########
# Instantiation
##########
def osnet_x1_0(num_classes=1000, loss='softmax', **kwargs):
# standard size (width x1.0)
return OSNet(num_classes, blocks=[OSBlock, OSBlock, OSBlock], layers=[2, 2, 2],
channels=[64, 256, 384, 512], loss=loss, **kwargs)
def osnet_x0_75(num_classes=1000, loss='softmax', **kwargs):
# medium size (width x0.75)
return OSNet(num_classes, blocks=[OSBlock, OSBlock, OSBlock], layers=[2, 2, 2],
channels=[48, 192, 288, 384], loss=loss, **kwargs)
def osnet_x0_5(num_classes=1000, loss='softmax', **kwargs):
# tiny size (width x0.5)
return OSNet(num_classes, blocks=[OSBlock, OSBlock, OSBlock], layers=[2, 2, 2],
channels=[32, 128, 192, 256], loss=loss, **kwargs)
def osnet_x0_25(num_classes=1000, loss='softmax', **kwargs):
# very tiny size (width x0.25)
return OSNet(num_classes, blocks=[OSBlock, OSBlock, OSBlock], layers=[2, 2, 2],
channels=[16, 64, 96, 128], loss=loss, **kwargs)
def osnet_ibn_x1_0(num_classes=1000, loss='softmax', **kwargs):
# standard size (width x1.0) + IBN layer
# Ref: Pan et al. Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net. ECCV, 2018.
return OSNet(num_classes, blocks=[OSBlock, OSBlock, OSBlock], layers=[2, 2, 2],
channels=[64, 256, 384, 512], loss=loss, IN=True, **kwargs)
| 35.544928
| 118
| 0.593737
|
fbd7074717242643579d75256625a09a06e786d7
| 10,587
|
py
|
Python
|
colorker/service/bigquery.py
|
jkachika/columbus-worker
|
16222ab876ffbf44e4b556ef5b6ec71ac2097892
|
[
"MIT"
] | null | null | null |
colorker/service/bigquery.py
|
jkachika/columbus-worker
|
16222ab876ffbf44e4b556ef5b6ec71ac2097892
|
[
"MIT"
] | null | null | null |
colorker/service/bigquery.py
|
jkachika/columbus-worker
|
16222ab876ffbf44e4b556ef5b6ec71ac2097892
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
#
# Author: Johnson Kachikaran (johnsoncharles26@gmail.com)
# Date: 19th May 2016
# BigQuery Service Python API:
# https://developers.google.com/resources/api-libraries/documentation/bigquery/v2/python/latest/index.html
"""
Includes functions to integrate with Google Bigquery. The results and implementation is based on the API
provided by the Google Bigquery API:
https://developers.google.com/resources/api-libraries/documentation/bigquery/v2/python/latest/index.html
"""
import logging
import traceback
from colorker.security import CredentialManager
logger = logging.getLogger('worker')
def _fetch_projects(user_settings=None):
bq_service = CredentialManager.get_big_query_service(user_settings)
projects = bq_service.projects()
response = projects.list().execute(num_retries=3)
project_list = response["projects"]
result = []
for project in project_list:
result.append(project["projectReference"]["projectId"])
return result
def _fetch_datasets(project_id, user_settings=None):
bq_service = CredentialManager.get_big_query_service(user_settings)
datasets = bq_service.datasets()
response = datasets.list(projectId=project_id).execute(num_retries=3)
dataset_list = response["datasets"]
result = []
for dataset in dataset_list:
result.append(dataset["datasetReference"]["datasetId"])
return result
def _fetch_tables(project_id, dataset_id, user_settings=None):
bq_service = CredentialManager.get_big_query_service(user_settings)
tables = bq_service.tables()
response = tables.list(projectId=project_id, datasetId=dataset_id).execute(num_retries=3)
table_list = response["tables"]
result = []
for table in table_list:
result.append(table["tableReference"]["tableId"])
return result
def _describe_table(table_id, dataset_id, project_id, user_settings=None):
bq_service = CredentialManager.get_big_query_service(user_settings)
tables = bq_service.tables()
response = tables.get(projectId=project_id, datasetId=dataset_id, tableId=table_id).execute(num_retries=3)
return response
def _execute_job(project_id, dataset_id, query, sync=False, user_settings=None):
bq_service = CredentialManager.get_big_query_service(user_settings)
jobs = bq_service.jobs()
body = { # uses queryCache feature by default
"timeoutMs": 45 * 1000,
"defaultDataset": {
"projectId": project_id,
"datasetId": dataset_id
},
"maxResults": 5000,
"query": query
}
job = jobs.query(projectId=project_id, body=body).execute(num_retries=3)
job_id = job["jobReference"]["jobId"]
response = {}
result_rows = []
if sync: # synchronous call. will wait until the job is finished.
while not job["jobComplete"]:
job = jobs.getQueryResults(projectId=project_id, jobId=job_id, timeoutMs=45 * 1000,
maxResults=5000).execute(num_retries=3)
if job["jobComplete"]:
total_rows = int(job["totalRows"])
cached = str(job["cacheHit"])
fields = []
python_types = {"INTEGER": int, "FLOAT": float, "STRING": str}
for field in job["schema"]["fields"]:
fields.append({"name": str(field["name"]), "type": str(field["type"])})
more_results = True
while more_results:
for row in job["rows"]:
result_row = []
for index, field in enumerate(row["f"]):
try:
result_row.append({"v": python_types.get(fields[index]["type"], str)(field["v"])})
except TypeError:
if fields[index]["type"] == 'INTEGER' or fields[index]["type"] == 'FLOAT':
result_row.append({"v": float('NaN')})
else:
result_row.append({"v": 'NaN'})
result_rows.append(result_row)
page_token = job.get("pageToken", None)
more_results = True if page_token else False
if more_results:
job = jobs.getQueryResults(projectId=project_id, jobId=job_id, timeoutMs=45 * 1000,
pageToken=page_token, maxResults=5000).execute(num_retries=3)
response['fields'] = fields
response['rows'] = result_rows
response['total'] = total_rows
response['cached'] = cached
return response
def _parse_table_name(qualified_table_name):
project_index = qualified_table_name.index(':')
dataset_index = qualified_table_name.index('.')
project_id = qualified_table_name[0:project_index]
dataset_id = qualified_table_name[project_index + 1:dataset_index]
table_id = qualified_table_name[dataset_index + 1:]
return dict(tid=table_id, did=dataset_id, pid=project_id)
def get_all_tables(user_settings=None):
"""
Obtains all the table names from all the bigquery projects.
:param dict user_settings: optional, A dictionary of settings specifying credentials for appropriate services.
If one is not provided, then this method must be invoked by an EngineThread
which defines the settings
:return: `[{project_name:dataset_name : [table_name_1, table_name_2]}]`
:rtype: list(dict)
"""
all_tables = []
projects = _fetch_projects(user_settings)
for project in projects:
datasets = _fetch_datasets(project, user_settings)
for dataset in datasets:
tables = _fetch_tables(project_id=project, dataset_id=dataset, user_settings=user_settings)
group = []
for table in tables:
group.append(str(table))
all_tables.append({str(project + ":" + dataset): group})
return all_tables
def get_features(qualified_table_name, user_settings=None):
"""
Obtains the columns of a bigquery table
:param str qualified_table_name: table name, must be of the form `project_name:dataset_name.table_name`
:param dict user_settings: optional, A dictionary of settings specifying credentials for appropriate services.
If one is not provided, then this method must be invoked by an EngineThread
which defines the settings
:return: List of key value pairs where key is column name and value is column type
:rtype: list
"""
try:
metadata = _parse_table_name(qualified_table_name)
table = _describe_table(table_id=metadata["tid"], dataset_id=metadata["did"], project_id=metadata["pid"],
user_settings=user_settings)
schema = table.get('schema', None)
if schema is not None:
features = schema.get('fields', None)
if features is not None:
types = {'INTEGER': 1, 'FLOAT': 3, 'STRING': 9}
return [{str(feature['name']): types.get(feature['type'], 9)} for feature in sorted(features)]
except BaseException as e:
logger.error(e.message)
logger.error(traceback.format_exc())
return []
def get_distinct_feature(feature, qualified_table_name, where=None, sync=False, user_settings=None):
table = _parse_table_name(qualified_table_name)
if where is not None:
query = "SELECT " + str(feature) + ", COUNT(" + str(feature) + ") AS count FROM [" + str(
qualified_table_name) + "] WHERE " + where + " GROUP BY " + str(
feature) + " ORDER BY " + str(feature) + " ASC"
else:
query = "SELECT " + str(feature) + ", COUNT(" + str(feature) + ") AS count FROM [" + str(
qualified_table_name) + "] GROUP BY " + str(
feature) + " ORDER BY " + str(feature) + " ASC"
return _execute_job(project_id=table["pid"], dataset_id=table["did"], query=query, sync=sync,
user_settings=user_settings)
def get_count_star(qualified_table_name, where=None, sync=False, user_settings=None):
table = _parse_table_name(qualified_table_name)
if where is not None:
query = "SELECT COUNT(*) AS count FROM [" + str(qualified_table_name) + "] WHERE " + where
else:
query = "SELECT COUNT(*) AS count FROM [" + str(qualified_table_name) + "]"
return _execute_job(project_id=table["pid"], dataset_id=table["did"], query=query, sync=sync,
user_settings=user_settings)
def get_first_feature(feature, qualified_table_name, user_settings=None):
table = _parse_table_name(qualified_table_name)
return _execute_job(project_id=table["pid"], dataset_id=table["did"],
query="SELECT " + str(feature) + " FROM [" + str(qualified_table_name) + "] WHERE " +
str(feature) + " IS NOT NULL LIMIT 1", sync=True, user_settings=user_settings)
def select_star(qualified_table_name, where=None, sync=False, user_settings=None):
table = _parse_table_name(qualified_table_name)
if where is not None:
query = "SELECT * FROM [" + str(qualified_table_name) + "] WHERE " + where
else:
query = "SELECT * FROM [" + str(qualified_table_name) + "]"
return _execute_job(project_id=table["pid"], dataset_id=table["did"], query=query, sync=sync,
user_settings=user_settings)
def get_query_results(qualified_table_name, query, user_settings=None):
"""
Obtains the results of a query. A call to this method will block until the results are obtained
:param str qualified_table_name: table name, must be of the form project-name:dataset-name.table-name
:param str query: A SQL query that conforms to the syntax of Bigquery query
:param dict user_settings: optional, A dictionary of settings specifying credentials for appropriate services.
If one is not provided, then this method must be invoked by an EngineThread
which defines the settings
:return: `{fields: [{name: column_name_1, type:column_type_1}, ...],`
`rows: [[{v:column_1_value}, {v:column_2_value}, ...], [{v:column_1_value}, {v:column_2_value}, ...]],`
`total: total_number_of_rows,`
`cached: boolean, whether the results returned were obtained from cache}`
:rtype: dict
"""
table = _parse_table_name(qualified_table_name)
return _execute_job(project_id=table["pid"], dataset_id=table["did"],
query=query, sync=True, user_settings=user_settings)
| 45.051064
| 117
| 0.652309
|
21b6f158c7f38371e55cbc75849371c9f6745c80
| 85
|
py
|
Python
|
runtime/python/Lib/xml/etree/cElementTree.py
|
hwaipy/InteractionFreeNode
|
88642b68430f57b028fd0f276a5709f89279e30d
|
[
"MIT"
] | 207
|
2018-10-01T08:53:01.000Z
|
2022-03-14T12:15:54.000Z
|
lib/assets/Lib/xml/etree/cElementTree.py
|
it56660024/cafe-grader-web
|
e9a1305fd62e79e54f6961f97ddc5cd57bafd73c
|
[
"MIT"
] | 30
|
2019-01-04T10:14:56.000Z
|
2020-10-12T14:00:31.000Z
|
lib/assets/Lib/xml/etree/cElementTree.py
|
it56660024/cafe-grader-web
|
e9a1305fd62e79e54f6961f97ddc5cd57bafd73c
|
[
"MIT"
] | 76
|
2020-03-16T01:47:46.000Z
|
2022-03-21T16:37:07.000Z
|
# Deprecated alias for xml.etree.ElementTree
from xml.etree.ElementTree import *
| 21.25
| 45
| 0.776471
|
4e62484d3dfa7d21d23d57d1dbc9df7d35d7a66e
| 12,053
|
py
|
Python
|
avalanche/evaluation/metric_utils.py
|
lipovsek/avalanche
|
1f06502b12140b39f48adf5a5f3b5de8ec2a930b
|
[
"MIT"
] | null | null | null |
avalanche/evaluation/metric_utils.py
|
lipovsek/avalanche
|
1f06502b12140b39f48adf5a5f3b5de8ec2a930b
|
[
"MIT"
] | null | null | null |
avalanche/evaluation/metric_utils.py
|
lipovsek/avalanche
|
1f06502b12140b39f48adf5a5f3b5de8ec2a930b
|
[
"MIT"
] | null | null | null |
################################################################################
# Copyright (c) 2021 ContinualAI. #
# Copyrights licensed under the MIT License. #
# See the accompanying LICENSE file for terms. #
# #
# Date: 14-12-2020 #
# Author(s): Lorenzo Pellegrini #
# E-mail: contact@continualai.org #
# Website: www.continualai.org #
################################################################################
from typing import Dict, Union, Iterable, Sequence, Tuple, TYPE_CHECKING, List
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.axes import Axes
from numpy import ndarray, arange
from torch import Tensor
if TYPE_CHECKING:
from avalanche.training.templates.supervised import SupervisedTemplate
from avalanche.benchmarks.scenarios import ClassificationExperience
from avalanche.evaluation import PluginMetric
EVAL = "eval"
TRAIN = "train"
def default_cm_image_creator(
confusion_matrix_tensor: Tensor,
display_labels: Sequence = None,
include_values=False,
xticks_rotation=0,
yticks_rotation=0,
values_format=None,
cmap="viridis",
image_title="",
):
"""
The default Confusion Matrix image creator.
Code adapted from
`Scikit learn <https://scikit-learn.org/stable/modules/generated/sklearn.metrics.plot_confusion_matrix.html>`_ # noqa
:param confusion_matrix_tensor: The tensor describing the confusion matrix.
This can be easily obtained through Scikit-learn `confusion_matrix`
utility.
:param display_labels: Target names used for plotting. By default, `labels`
will be used if it is defined, otherwise the values will be inferred by
the matrix tensor.
:param include_values: Includes values in confusion matrix. Defaults to
`False`.
:param xticks_rotation: Rotation of xtick labels. Valid values are
float point value. Defaults to 0.
:param yticks_rotation: Rotation of ytick labels. Valid values are
float point value. Defaults to 0.
:param values_format: Format specification for values in confusion matrix.
Defaults to `None`, which means that the format specification is
'd' or '.2g', whichever is shorter.
:param cmap: Must be a str or a Colormap recognized by matplotlib.
Defaults to 'viridis'.
:param image_title: The title of the image. Defaults to an empty string.
:return: The Confusion Matrix as a PIL Image.
"""
fig, ax = plt.subplots()
cm = confusion_matrix_tensor.numpy()
n_classes = cm.shape[0]
im_ = ax.imshow(cm, interpolation="nearest", cmap=cmap)
cmap_min, cmap_max = im_.cmap(0), im_.cmap(256)
if include_values:
text_ = np.empty_like(cm, dtype=object)
# print text with appropriate color depending on background
thresh = (cm.max() + cm.min()) / 2.0
for i in range(n_classes):
for j in range(n_classes):
color = cmap_max if cm[i, j] < thresh else cmap_min
if values_format is None:
text_cm = format(cm[i, j], ".2g")
if cm.dtype.kind != "f":
text_d = format(cm[i, j], "d")
if len(text_d) < len(text_cm):
text_cm = text_d
else:
text_cm = format(cm[i, j], values_format)
text_[i, j] = ax.text(
j, i, text_cm, ha="center", va="center", color=color
)
if display_labels is None:
display_labels = np.arange(n_classes)
fig.colorbar(im_, ax=ax)
ax.set(
xticks=np.arange(n_classes),
yticks=np.arange(n_classes),
xticklabels=display_labels,
yticklabels=display_labels,
ylabel="True label",
xlabel="Predicted label",
)
if image_title != "":
ax.set_title(image_title)
ax.set_ylim((n_classes - 0.5, -0.5))
plt.setp(ax.get_xticklabels(), rotation=xticks_rotation)
plt.setp(ax.get_yticklabels(), rotation=yticks_rotation)
fig.tight_layout()
return fig
SEABORN_COLORS = (
(0.2980392156862745, 0.4470588235294118, 0.6901960784313725),
(0.8666666666666667, 0.5176470588235295, 0.3215686274509804),
(0.3333333333333333, 0.6588235294117647, 0.40784313725490196),
(0.7686274509803922, 0.3058823529411765, 0.3215686274509804),
(0.5058823529411764, 0.4470588235294118, 0.7019607843137254),
(0.5764705882352941, 0.47058823529411764, 0.3764705882352941),
(0.8549019607843137, 0.5450980392156862, 0.7647058823529411),
(0.5490196078431373, 0.5490196078431373, 0.5490196078431373),
(0.8, 0.7254901960784313, 0.4549019607843137),
(0.39215686274509803, 0.7098039215686275, 0.803921568627451),
)
def repartition_pie_chart_image_creator(
label2counts: Dict[int, List[int]],
counters: List[int],
colors: Union[ndarray, Iterable, int, float] = SEABORN_COLORS,
fmt: str = "%1.1f%%",
):
"""
Create a pie chart representing the labels repartition.
:param label2counts: A dict holding the counts for each label, of the form
{label: [count_at_step_0, count_at_step_1, ...]}. Only the last count of
each label is used here.
:param counters: (unused) The steps the counts were taken at.
:param colors: The colors to use in the chart.
:param fmt: Formatting used to display the text values in the chart.
"""
fig, ax = plt.subplots()
ax: Axes
labels, counts = zip(*((label, c[-1]) for label, c in label2counts.items()))
ax.pie(counts, labels=labels, autopct=fmt, colors=colors)
fig.tight_layout()
return fig
def repartition_bar_chart_image_creator(
label2counts: Dict[int, List[int]],
counters: List[int],
colors: Union[ndarray, Iterable, int, float] = SEABORN_COLORS,
):
"""
Create a bar chart representing the labels repartition.
:param label2counts: A dict holding the counts for each label, of the form
{label: [count_at_step_0, count_at_step_1, ...]}. Only the last count of
each label is used here.
:param counters: (unused) The steps the counts were taken at.
:param colors: The colors to use in the chart.
"""
fig, ax = plt.subplots()
ax: Axes
y = -arange(len(label2counts))
labels, counts = zip(*((label, c[-1]) for label, c in label2counts.items()))
total = sum(counts)
ax.barh(y, width=counts, color=colors)
ax.set_yticks(y)
ax.set_yticklabels(labels)
ax.set_xlabel("Number of exemplars")
ax.set_ylabel("Class")
for i, count in enumerate(counts):
ax.text(count / 2, -i, f"{count/total:.1%}", va="center", ha="center")
fig.tight_layout()
return fig
def default_history_repartition_image_creator(
label2counts: Dict[int, List[int]],
counters: List[int],
colors: Union[ndarray, Iterable, int, float] = SEABORN_COLORS,
):
"""
Create a stack plot representing the labels repartition with their history.
:param label2counts: A dict holding the counts for each label, of the form
{label: [count_at_step_0, count_at_step_1, ...]}.
:param counters: The steps the counts were taken at.
:param colors: The colors to use in the chart.
"""
fig, ax = plt.subplots()
ax: Axes
ax.stackplot(
counters,
label2counts.values(),
labels=label2counts.keys(),
colors=colors,
)
ax.legend(loc="upper left")
ax.set_ylabel("Number of examples")
ax.set_xlabel("step")
fig.tight_layout()
return fig
def stream_type(experience: "ClassificationExperience") -> str:
"""
Returns the stream name from which the experience belongs to.
e.g. the experience can be part of train or test stream.
:param experience: the instance of the experience
"""
return experience.origin_stream.name
def phase_and_task(strategy: "SupervisedTemplate") -> Tuple[str, int]:
"""
Returns the current phase name and the associated task label.
The current task label depends on the phase. During the training
phase, the task label is the one defined in the "train_task_label"
field. On the contrary, during the eval phase the task label is the one
defined in the "eval_task_label" field.
:param strategy: The strategy instance to get the task label from.
:return: The current phase name as either "Train" or "Task" and the
associated task label.
"""
if hasattr(strategy.experience, "task_labels"):
task = strategy.experience.task_labels
if len(task) > 1:
task = None # task labels per patterns
else:
task = task[0]
else:
task = None
if strategy.is_eval:
return EVAL, task
else:
return TRAIN, task
def bytes2human(n):
# http://code.activestate.com/recipes/578019
# >>> bytes2human(10000)
# '9.8K'
# >>> bytes2human(100001221)
# '95.4M'
symbols = ("K", "M", "G", "T", "P", "E", "Z", "Y")
prefix = {}
for i, s in enumerate(symbols):
prefix[s] = 1 << (i + 1) * 10
for s in reversed(symbols):
if n >= prefix[s]:
value = float(n) / prefix[s]
return "%.1f%s" % (value, s)
return "%sB" % n
def get_metric_name(
metric: "PluginMetric",
strategy: "SupervisedTemplate",
add_experience=False,
add_task=True,
):
"""
Return the complete metric name used to report its current value.
The name is composed by:
metric string representation /phase type/stream type/task id
where metric string representation is a synthetic string
describing the metric, phase type describe if the user
is training (train) or evaluating (eval), stream type describes
the type of stream the current experience belongs to (e.g. train, test)
and task id is the current task label.
:param metric: the metric object for which return the complete name
:param strategy: the current strategy object
:param add_experience: if True, add eval_exp_id to the main metric name.
Default to False.
:param add_task: if True the main metric name will include the task
information. If False, no task label will be displayed.
If an int, that value will be used as task label for the metric name.
"""
phase_name, task_label = phase_and_task(strategy)
stream = stream_type(strategy.experience)
base_name = "{}/{}_phase/{}_stream".format(str(metric), phase_name, stream)
exp_name = "/Exp{:03}".format(strategy.experience.current_experience)
# task label not present - do not print task
if task_label is None and type(add_task) == bool:
add_task = False
else:
# task label is present and printed
if type(add_task) == bool and add_task is True:
task_name = "/Task{:03}".format(task_label)
# print user-defined task label
elif type(add_task) == int:
task_name = "/Task{:03}".format(add_task)
add_task = True
# else case is add_task=False
if add_experience and not add_task:
return base_name + exp_name
elif add_experience and add_task:
return base_name + task_name + exp_name
elif not add_experience and not add_task:
return base_name
elif not add_experience and add_task:
return base_name + task_name
__all__ = [
"default_cm_image_creator",
"phase_and_task",
"get_metric_name",
"stream_type",
"bytes2human",
"default_history_repartition_image_creator",
"repartition_pie_chart_image_creator",
"repartition_bar_chart_image_creator",
]
| 34.83526
| 121
| 0.63171
|
b86fea309653e99df1c4e8e5b24546f9bbd1258c
| 451
|
py
|
Python
|
secret.py
|
wyang2/404lab3
|
4637dfed66c33681f7ebb86a21c75c5cda0efa26
|
[
"Apache-2.0"
] | null | null | null |
secret.py
|
wyang2/404lab3
|
4637dfed66c33681f7ebb86a21c75c5cda0efa26
|
[
"Apache-2.0"
] | null | null | null |
secret.py
|
wyang2/404lab3
|
4637dfed66c33681f7ebb86a21c75c5cda0efa26
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import cgi
import cgitb
cgitb.enable()
class FollowingTheTAsInstructionsError(Exception):
def __init__(self):
Exception.__init__(self, (
"You must edit secret.py to change the username, password, "
"and to delete this error!"
))
# Delete this line:
#raise FollowingTheTAsInstructionsError
# Edit the following two lines:
username = "yang"
password = "yang"
| 22.55
| 72
| 0.674058
|
0656ff5581be5d7cc15d153adce231da8d48e625
| 22,910
|
py
|
Python
|
Lib/test/test_stable_abi_ctypes.py
|
adnull/cpython
|
f2b4e458b3327130e46edb4efe8e1847de09efc5
|
[
"0BSD"
] | null | null | null |
Lib/test/test_stable_abi_ctypes.py
|
adnull/cpython
|
f2b4e458b3327130e46edb4efe8e1847de09efc5
|
[
"0BSD"
] | 4
|
2021-12-01T11:57:28.000Z
|
2022-03-01T20:05:21.000Z
|
Lib/test/test_stable_abi_ctypes.py
|
adnull/cpython
|
f2b4e458b3327130e46edb4efe8e1847de09efc5
|
[
"0BSD"
] | null | null | null |
# Generated by Tools/scripts/stable_abi.py
"""Test that all symbols of the Stable ABI are accessible using ctypes
"""
import unittest
from test.support.import_helper import import_module
ctypes_test = import_module('ctypes')
class TestStableABIAvailability(unittest.TestCase):
def test_available_symbols(self):
for symbol_name in SYMBOL_NAMES:
with self.subTest(symbol_name):
ctypes_test.pythonapi[symbol_name]
SYMBOL_NAMES = (
"PyAIter_Check",
"PyArg_Parse",
"PyArg_ParseTuple",
"PyArg_ParseTupleAndKeywords",
"PyArg_UnpackTuple",
"PyArg_VaParse",
"PyArg_VaParseTupleAndKeywords",
"PyArg_ValidateKeywordArguments",
"PyBaseObject_Type",
"PyBool_FromLong",
"PyBool_Type",
"PyBuffer_FillContiguousStrides",
"PyBuffer_FillInfo",
"PyBuffer_FromContiguous",
"PyBuffer_GetPointer",
"PyBuffer_IsContiguous",
"PyBuffer_Release",
"PyBuffer_SizeFromFormat",
"PyBuffer_ToContiguous",
"PyByteArrayIter_Type",
"PyByteArray_AsString",
"PyByteArray_Concat",
"PyByteArray_FromObject",
"PyByteArray_FromStringAndSize",
"PyByteArray_Resize",
"PyByteArray_Size",
"PyByteArray_Type",
"PyBytesIter_Type",
"PyBytes_AsString",
"PyBytes_AsStringAndSize",
"PyBytes_Concat",
"PyBytes_ConcatAndDel",
"PyBytes_DecodeEscape",
"PyBytes_FromFormat",
"PyBytes_FromFormatV",
"PyBytes_FromObject",
"PyBytes_FromString",
"PyBytes_FromStringAndSize",
"PyBytes_Repr",
"PyBytes_Size",
"PyBytes_Type",
"PyCFunction_Call",
"PyCFunction_GetFlags",
"PyCFunction_GetFunction",
"PyCFunction_GetSelf",
"PyCFunction_New",
"PyCFunction_NewEx",
"PyCFunction_Type",
"PyCMethod_New",
"PyCallIter_New",
"PyCallIter_Type",
"PyCallable_Check",
"PyCapsule_GetContext",
"PyCapsule_GetDestructor",
"PyCapsule_GetName",
"PyCapsule_GetPointer",
"PyCapsule_Import",
"PyCapsule_IsValid",
"PyCapsule_New",
"PyCapsule_SetContext",
"PyCapsule_SetDestructor",
"PyCapsule_SetName",
"PyCapsule_SetPointer",
"PyCapsule_Type",
"PyClassMethodDescr_Type",
"PyCodec_BackslashReplaceErrors",
"PyCodec_Decode",
"PyCodec_Decoder",
"PyCodec_Encode",
"PyCodec_Encoder",
"PyCodec_IgnoreErrors",
"PyCodec_IncrementalDecoder",
"PyCodec_IncrementalEncoder",
"PyCodec_KnownEncoding",
"PyCodec_LookupError",
"PyCodec_NameReplaceErrors",
"PyCodec_Register",
"PyCodec_RegisterError",
"PyCodec_ReplaceErrors",
"PyCodec_StreamReader",
"PyCodec_StreamWriter",
"PyCodec_StrictErrors",
"PyCodec_Unregister",
"PyCodec_XMLCharRefReplaceErrors",
"PyComplex_FromDoubles",
"PyComplex_ImagAsDouble",
"PyComplex_RealAsDouble",
"PyComplex_Type",
"PyDescr_NewClassMethod",
"PyDescr_NewGetSet",
"PyDescr_NewMember",
"PyDescr_NewMethod",
"PyDictItems_Type",
"PyDictIterItem_Type",
"PyDictIterKey_Type",
"PyDictIterValue_Type",
"PyDictKeys_Type",
"PyDictProxy_New",
"PyDictProxy_Type",
"PyDictRevIterItem_Type",
"PyDictRevIterKey_Type",
"PyDictRevIterValue_Type",
"PyDictValues_Type",
"PyDict_Clear",
"PyDict_Contains",
"PyDict_Copy",
"PyDict_DelItem",
"PyDict_DelItemString",
"PyDict_GetItem",
"PyDict_GetItemString",
"PyDict_GetItemWithError",
"PyDict_Items",
"PyDict_Keys",
"PyDict_Merge",
"PyDict_MergeFromSeq2",
"PyDict_New",
"PyDict_Next",
"PyDict_SetItem",
"PyDict_SetItemString",
"PyDict_Size",
"PyDict_Type",
"PyDict_Update",
"PyDict_Values",
"PyEllipsis_Type",
"PyEnum_Type",
"PyErr_BadArgument",
"PyErr_BadInternalCall",
"PyErr_CheckSignals",
"PyErr_Clear",
"PyErr_Display",
"PyErr_ExceptionMatches",
"PyErr_Fetch",
"PyErr_Format",
"PyErr_FormatV",
"PyErr_GetExcInfo",
"PyErr_GetHandledException",
"PyErr_GivenExceptionMatches",
"PyErr_NewException",
"PyErr_NewExceptionWithDoc",
"PyErr_NoMemory",
"PyErr_NormalizeException",
"PyErr_Occurred",
"PyErr_Print",
"PyErr_PrintEx",
"PyErr_ProgramText",
"PyErr_ResourceWarning",
"PyErr_Restore",
"PyErr_SetExcInfo",
"PyErr_SetFromErrno",
"PyErr_SetFromErrnoWithFilename",
"PyErr_SetFromErrnoWithFilenameObject",
"PyErr_SetFromErrnoWithFilenameObjects",
"PyErr_SetHandledException",
"PyErr_SetImportError",
"PyErr_SetImportErrorSubclass",
"PyErr_SetInterrupt",
"PyErr_SetInterruptEx",
"PyErr_SetNone",
"PyErr_SetObject",
"PyErr_SetString",
"PyErr_SyntaxLocation",
"PyErr_SyntaxLocationEx",
"PyErr_WarnEx",
"PyErr_WarnExplicit",
"PyErr_WarnFormat",
"PyErr_WriteUnraisable",
"PyEval_AcquireLock",
"PyEval_AcquireThread",
"PyEval_CallFunction",
"PyEval_CallMethod",
"PyEval_CallObjectWithKeywords",
"PyEval_EvalCode",
"PyEval_EvalCodeEx",
"PyEval_EvalFrame",
"PyEval_EvalFrameEx",
"PyEval_GetBuiltins",
"PyEval_GetFrame",
"PyEval_GetFuncDesc",
"PyEval_GetFuncName",
"PyEval_GetGlobals",
"PyEval_GetLocals",
"PyEval_InitThreads",
"PyEval_ReleaseLock",
"PyEval_ReleaseThread",
"PyEval_RestoreThread",
"PyEval_SaveThread",
"PyEval_ThreadsInitialized",
"PyExc_ArithmeticError",
"PyExc_AssertionError",
"PyExc_AttributeError",
"PyExc_BaseException",
"PyExc_BaseExceptionGroup",
"PyExc_BlockingIOError",
"PyExc_BrokenPipeError",
"PyExc_BufferError",
"PyExc_BytesWarning",
"PyExc_ChildProcessError",
"PyExc_ConnectionAbortedError",
"PyExc_ConnectionError",
"PyExc_ConnectionRefusedError",
"PyExc_ConnectionResetError",
"PyExc_DeprecationWarning",
"PyExc_EOFError",
"PyExc_EncodingWarning",
"PyExc_EnvironmentError",
"PyExc_Exception",
"PyExc_FileExistsError",
"PyExc_FileNotFoundError",
"PyExc_FloatingPointError",
"PyExc_FutureWarning",
"PyExc_GeneratorExit",
"PyExc_IOError",
"PyExc_ImportError",
"PyExc_ImportWarning",
"PyExc_IndentationError",
"PyExc_IndexError",
"PyExc_InterruptedError",
"PyExc_IsADirectoryError",
"PyExc_KeyError",
"PyExc_KeyboardInterrupt",
"PyExc_LookupError",
"PyExc_MemoryError",
"PyExc_ModuleNotFoundError",
"PyExc_NameError",
"PyExc_NotADirectoryError",
"PyExc_NotImplementedError",
"PyExc_OSError",
"PyExc_OverflowError",
"PyExc_PendingDeprecationWarning",
"PyExc_PermissionError",
"PyExc_ProcessLookupError",
"PyExc_RecursionError",
"PyExc_ReferenceError",
"PyExc_ResourceWarning",
"PyExc_RuntimeError",
"PyExc_RuntimeWarning",
"PyExc_StopAsyncIteration",
"PyExc_StopIteration",
"PyExc_SyntaxError",
"PyExc_SyntaxWarning",
"PyExc_SystemError",
"PyExc_SystemExit",
"PyExc_TabError",
"PyExc_TimeoutError",
"PyExc_TypeError",
"PyExc_UnboundLocalError",
"PyExc_UnicodeDecodeError",
"PyExc_UnicodeEncodeError",
"PyExc_UnicodeError",
"PyExc_UnicodeTranslateError",
"PyExc_UnicodeWarning",
"PyExc_UserWarning",
"PyExc_ValueError",
"PyExc_Warning",
"PyExc_ZeroDivisionError",
"PyExceptionClass_Name",
"PyException_GetCause",
"PyException_GetContext",
"PyException_GetTraceback",
"PyException_SetCause",
"PyException_SetContext",
"PyException_SetTraceback",
"PyFile_FromFd",
"PyFile_GetLine",
"PyFile_WriteObject",
"PyFile_WriteString",
"PyFilter_Type",
"PyFloat_AsDouble",
"PyFloat_FromDouble",
"PyFloat_FromString",
"PyFloat_GetInfo",
"PyFloat_GetMax",
"PyFloat_GetMin",
"PyFloat_Type",
"PyFrame_GetCode",
"PyFrame_GetLineNumber",
"PyFrozenSet_New",
"PyFrozenSet_Type",
"PyGC_Collect",
"PyGC_Disable",
"PyGC_Enable",
"PyGC_IsEnabled",
"PyGILState_Ensure",
"PyGILState_GetThisThreadState",
"PyGILState_Release",
"PyGetSetDescr_Type",
"PyImport_AddModule",
"PyImport_AddModuleObject",
"PyImport_AppendInittab",
"PyImport_ExecCodeModule",
"PyImport_ExecCodeModuleEx",
"PyImport_ExecCodeModuleObject",
"PyImport_ExecCodeModuleWithPathnames",
"PyImport_GetImporter",
"PyImport_GetMagicNumber",
"PyImport_GetMagicTag",
"PyImport_GetModule",
"PyImport_GetModuleDict",
"PyImport_Import",
"PyImport_ImportFrozenModule",
"PyImport_ImportFrozenModuleObject",
"PyImport_ImportModule",
"PyImport_ImportModuleLevel",
"PyImport_ImportModuleLevelObject",
"PyImport_ImportModuleNoBlock",
"PyImport_ReloadModule",
"PyIndex_Check",
"PyInterpreterState_Clear",
"PyInterpreterState_Delete",
"PyInterpreterState_Get",
"PyInterpreterState_GetDict",
"PyInterpreterState_GetID",
"PyInterpreterState_New",
"PyIter_Check",
"PyIter_Next",
"PyIter_Send",
"PyListIter_Type",
"PyListRevIter_Type",
"PyList_Append",
"PyList_AsTuple",
"PyList_GetItem",
"PyList_GetSlice",
"PyList_Insert",
"PyList_New",
"PyList_Reverse",
"PyList_SetItem",
"PyList_SetSlice",
"PyList_Size",
"PyList_Sort",
"PyList_Type",
"PyLongRangeIter_Type",
"PyLong_AsDouble",
"PyLong_AsLong",
"PyLong_AsLongAndOverflow",
"PyLong_AsLongLong",
"PyLong_AsLongLongAndOverflow",
"PyLong_AsSize_t",
"PyLong_AsSsize_t",
"PyLong_AsUnsignedLong",
"PyLong_AsUnsignedLongLong",
"PyLong_AsUnsignedLongLongMask",
"PyLong_AsUnsignedLongMask",
"PyLong_AsVoidPtr",
"PyLong_FromDouble",
"PyLong_FromLong",
"PyLong_FromLongLong",
"PyLong_FromSize_t",
"PyLong_FromSsize_t",
"PyLong_FromString",
"PyLong_FromUnsignedLong",
"PyLong_FromUnsignedLongLong",
"PyLong_FromVoidPtr",
"PyLong_GetInfo",
"PyLong_Type",
"PyMap_Type",
"PyMapping_Check",
"PyMapping_GetItemString",
"PyMapping_HasKey",
"PyMapping_HasKeyString",
"PyMapping_Items",
"PyMapping_Keys",
"PyMapping_Length",
"PyMapping_SetItemString",
"PyMapping_Size",
"PyMapping_Values",
"PyMarshal_ReadObjectFromString",
"PyMarshal_WriteObjectToString",
"PyMem_Calloc",
"PyMem_Free",
"PyMem_Malloc",
"PyMem_Realloc",
"PyMemberDescr_Type",
"PyMember_GetOne",
"PyMember_SetOne",
"PyMemoryView_FromBuffer",
"PyMemoryView_FromMemory",
"PyMemoryView_FromObject",
"PyMemoryView_GetContiguous",
"PyMemoryView_Type",
"PyMethodDescr_Type",
"PyModuleDef_Init",
"PyModuleDef_Type",
"PyModule_AddFunctions",
"PyModule_AddIntConstant",
"PyModule_AddObject",
"PyModule_AddObjectRef",
"PyModule_AddStringConstant",
"PyModule_AddType",
"PyModule_ExecDef",
"PyModule_GetDef",
"PyModule_GetDict",
"PyModule_GetFilename",
"PyModule_GetFilenameObject",
"PyModule_GetName",
"PyModule_GetNameObject",
"PyModule_GetState",
"PyModule_New",
"PyModule_NewObject",
"PyModule_SetDocString",
"PyModule_Type",
"PyNumber_Absolute",
"PyNumber_Add",
"PyNumber_And",
"PyNumber_AsSsize_t",
"PyNumber_Check",
"PyNumber_Divmod",
"PyNumber_Float",
"PyNumber_FloorDivide",
"PyNumber_InPlaceAdd",
"PyNumber_InPlaceAnd",
"PyNumber_InPlaceFloorDivide",
"PyNumber_InPlaceLshift",
"PyNumber_InPlaceMatrixMultiply",
"PyNumber_InPlaceMultiply",
"PyNumber_InPlaceOr",
"PyNumber_InPlacePower",
"PyNumber_InPlaceRemainder",
"PyNumber_InPlaceRshift",
"PyNumber_InPlaceSubtract",
"PyNumber_InPlaceTrueDivide",
"PyNumber_InPlaceXor",
"PyNumber_Index",
"PyNumber_Invert",
"PyNumber_Long",
"PyNumber_Lshift",
"PyNumber_MatrixMultiply",
"PyNumber_Multiply",
"PyNumber_Negative",
"PyNumber_Or",
"PyNumber_Positive",
"PyNumber_Power",
"PyNumber_Remainder",
"PyNumber_Rshift",
"PyNumber_Subtract",
"PyNumber_ToBase",
"PyNumber_TrueDivide",
"PyNumber_Xor",
"PyOS_FSPath",
"PyOS_InputHook",
"PyOS_InterruptOccurred",
"PyOS_double_to_string",
"PyOS_getsig",
"PyOS_mystricmp",
"PyOS_mystrnicmp",
"PyOS_setsig",
"PyOS_snprintf",
"PyOS_string_to_double",
"PyOS_strtol",
"PyOS_strtoul",
"PyOS_vsnprintf",
"PyObject_ASCII",
"PyObject_AsCharBuffer",
"PyObject_AsFileDescriptor",
"PyObject_AsReadBuffer",
"PyObject_AsWriteBuffer",
"PyObject_Bytes",
"PyObject_Call",
"PyObject_CallFunction",
"PyObject_CallFunctionObjArgs",
"PyObject_CallMethod",
"PyObject_CallMethodObjArgs",
"PyObject_CallNoArgs",
"PyObject_CallObject",
"PyObject_Calloc",
"PyObject_CheckBuffer",
"PyObject_CheckReadBuffer",
"PyObject_ClearWeakRefs",
"PyObject_CopyData",
"PyObject_DelItem",
"PyObject_DelItemString",
"PyObject_Dir",
"PyObject_Format",
"PyObject_Free",
"PyObject_GC_Del",
"PyObject_GC_IsFinalized",
"PyObject_GC_IsTracked",
"PyObject_GC_Track",
"PyObject_GC_UnTrack",
"PyObject_GenericGetAttr",
"PyObject_GenericGetDict",
"PyObject_GenericSetAttr",
"PyObject_GenericSetDict",
"PyObject_GetAIter",
"PyObject_GetAttr",
"PyObject_GetAttrString",
"PyObject_GetBuffer",
"PyObject_GetItem",
"PyObject_GetIter",
"PyObject_HasAttr",
"PyObject_HasAttrString",
"PyObject_Hash",
"PyObject_HashNotImplemented",
"PyObject_Init",
"PyObject_InitVar",
"PyObject_IsInstance",
"PyObject_IsSubclass",
"PyObject_IsTrue",
"PyObject_Length",
"PyObject_Malloc",
"PyObject_Not",
"PyObject_Realloc",
"PyObject_Repr",
"PyObject_RichCompare",
"PyObject_RichCompareBool",
"PyObject_SelfIter",
"PyObject_SetAttr",
"PyObject_SetAttrString",
"PyObject_SetItem",
"PyObject_Size",
"PyObject_Str",
"PyObject_Type",
"PyProperty_Type",
"PyRangeIter_Type",
"PyRange_Type",
"PyReversed_Type",
"PySeqIter_New",
"PySeqIter_Type",
"PySequence_Check",
"PySequence_Concat",
"PySequence_Contains",
"PySequence_Count",
"PySequence_DelItem",
"PySequence_DelSlice",
"PySequence_Fast",
"PySequence_GetItem",
"PySequence_GetSlice",
"PySequence_In",
"PySequence_InPlaceConcat",
"PySequence_InPlaceRepeat",
"PySequence_Index",
"PySequence_Length",
"PySequence_List",
"PySequence_Repeat",
"PySequence_SetItem",
"PySequence_SetSlice",
"PySequence_Size",
"PySequence_Tuple",
"PySetIter_Type",
"PySet_Add",
"PySet_Clear",
"PySet_Contains",
"PySet_Discard",
"PySet_New",
"PySet_Pop",
"PySet_Size",
"PySet_Type",
"PySlice_AdjustIndices",
"PySlice_GetIndices",
"PySlice_GetIndicesEx",
"PySlice_New",
"PySlice_Type",
"PySlice_Unpack",
"PyState_AddModule",
"PyState_FindModule",
"PyState_RemoveModule",
"PyStructSequence_GetItem",
"PyStructSequence_New",
"PyStructSequence_NewType",
"PyStructSequence_SetItem",
"PyStructSequence_UnnamedField",
"PySuper_Type",
"PySys_AddWarnOption",
"PySys_AddWarnOptionUnicode",
"PySys_AddXOption",
"PySys_FormatStderr",
"PySys_FormatStdout",
"PySys_GetObject",
"PySys_GetXOptions",
"PySys_HasWarnOptions",
"PySys_ResetWarnOptions",
"PySys_SetArgv",
"PySys_SetArgvEx",
"PySys_SetObject",
"PySys_SetPath",
"PySys_WriteStderr",
"PySys_WriteStdout",
"PyThreadState_Clear",
"PyThreadState_Delete",
"PyThreadState_DeleteCurrent",
"PyThreadState_Get",
"PyThreadState_GetDict",
"PyThreadState_GetFrame",
"PyThreadState_GetID",
"PyThreadState_GetInterpreter",
"PyThreadState_New",
"PyThreadState_SetAsyncExc",
"PyThreadState_Swap",
"PyThread_GetInfo",
"PyThread_ReInitTLS",
"PyThread_acquire_lock",
"PyThread_acquire_lock_timed",
"PyThread_allocate_lock",
"PyThread_create_key",
"PyThread_delete_key",
"PyThread_delete_key_value",
"PyThread_exit_thread",
"PyThread_free_lock",
"PyThread_get_key_value",
"PyThread_get_stacksize",
"PyThread_get_thread_ident",
"PyThread_init_thread",
"PyThread_release_lock",
"PyThread_set_key_value",
"PyThread_set_stacksize",
"PyThread_start_new_thread",
"PyThread_tss_alloc",
"PyThread_tss_create",
"PyThread_tss_delete",
"PyThread_tss_free",
"PyThread_tss_get",
"PyThread_tss_is_created",
"PyThread_tss_set",
"PyTraceBack_Here",
"PyTraceBack_Print",
"PyTraceBack_Type",
"PyTupleIter_Type",
"PyTuple_GetItem",
"PyTuple_GetSlice",
"PyTuple_New",
"PyTuple_Pack",
"PyTuple_SetItem",
"PyTuple_Size",
"PyTuple_Type",
"PyType_ClearCache",
"PyType_FromModuleAndSpec",
"PyType_FromSpec",
"PyType_FromSpecWithBases",
"PyType_GenericAlloc",
"PyType_GenericNew",
"PyType_GetFlags",
"PyType_GetModule",
"PyType_GetModuleState",
"PyType_GetName",
"PyType_GetQualName",
"PyType_GetSlot",
"PyType_IsSubtype",
"PyType_Modified",
"PyType_Ready",
"PyType_Type",
"PyUnicodeDecodeError_Create",
"PyUnicodeDecodeError_GetEncoding",
"PyUnicodeDecodeError_GetEnd",
"PyUnicodeDecodeError_GetObject",
"PyUnicodeDecodeError_GetReason",
"PyUnicodeDecodeError_GetStart",
"PyUnicodeDecodeError_SetEnd",
"PyUnicodeDecodeError_SetReason",
"PyUnicodeDecodeError_SetStart",
"PyUnicodeEncodeError_GetEncoding",
"PyUnicodeEncodeError_GetEnd",
"PyUnicodeEncodeError_GetObject",
"PyUnicodeEncodeError_GetReason",
"PyUnicodeEncodeError_GetStart",
"PyUnicodeEncodeError_SetEnd",
"PyUnicodeEncodeError_SetReason",
"PyUnicodeEncodeError_SetStart",
"PyUnicodeIter_Type",
"PyUnicodeTranslateError_GetEnd",
"PyUnicodeTranslateError_GetObject",
"PyUnicodeTranslateError_GetReason",
"PyUnicodeTranslateError_GetStart",
"PyUnicodeTranslateError_SetEnd",
"PyUnicodeTranslateError_SetReason",
"PyUnicodeTranslateError_SetStart",
"PyUnicode_Append",
"PyUnicode_AppendAndDel",
"PyUnicode_AsASCIIString",
"PyUnicode_AsCharmapString",
"PyUnicode_AsDecodedObject",
"PyUnicode_AsDecodedUnicode",
"PyUnicode_AsEncodedObject",
"PyUnicode_AsEncodedString",
"PyUnicode_AsEncodedUnicode",
"PyUnicode_AsLatin1String",
"PyUnicode_AsRawUnicodeEscapeString",
"PyUnicode_AsUCS4",
"PyUnicode_AsUCS4Copy",
"PyUnicode_AsUTF16String",
"PyUnicode_AsUTF32String",
"PyUnicode_AsUTF8AndSize",
"PyUnicode_AsUTF8String",
"PyUnicode_AsUnicodeEscapeString",
"PyUnicode_AsWideChar",
"PyUnicode_AsWideCharString",
"PyUnicode_BuildEncodingMap",
"PyUnicode_Compare",
"PyUnicode_CompareWithASCIIString",
"PyUnicode_Concat",
"PyUnicode_Contains",
"PyUnicode_Count",
"PyUnicode_Decode",
"PyUnicode_DecodeASCII",
"PyUnicode_DecodeCharmap",
"PyUnicode_DecodeFSDefault",
"PyUnicode_DecodeFSDefaultAndSize",
"PyUnicode_DecodeLatin1",
"PyUnicode_DecodeLocale",
"PyUnicode_DecodeLocaleAndSize",
"PyUnicode_DecodeRawUnicodeEscape",
"PyUnicode_DecodeUTF16",
"PyUnicode_DecodeUTF16Stateful",
"PyUnicode_DecodeUTF32",
"PyUnicode_DecodeUTF32Stateful",
"PyUnicode_DecodeUTF7",
"PyUnicode_DecodeUTF7Stateful",
"PyUnicode_DecodeUTF8",
"PyUnicode_DecodeUTF8Stateful",
"PyUnicode_DecodeUnicodeEscape",
"PyUnicode_EncodeFSDefault",
"PyUnicode_EncodeLocale",
"PyUnicode_FSConverter",
"PyUnicode_FSDecoder",
"PyUnicode_Find",
"PyUnicode_FindChar",
"PyUnicode_Format",
"PyUnicode_FromEncodedObject",
"PyUnicode_FromFormat",
"PyUnicode_FromFormatV",
"PyUnicode_FromObject",
"PyUnicode_FromOrdinal",
"PyUnicode_FromString",
"PyUnicode_FromStringAndSize",
"PyUnicode_FromWideChar",
"PyUnicode_GetDefaultEncoding",
"PyUnicode_GetLength",
"PyUnicode_GetSize",
"PyUnicode_InternFromString",
"PyUnicode_InternImmortal",
"PyUnicode_InternInPlace",
"PyUnicode_IsIdentifier",
"PyUnicode_Join",
"PyUnicode_Partition",
"PyUnicode_RPartition",
"PyUnicode_RSplit",
"PyUnicode_ReadChar",
"PyUnicode_Replace",
"PyUnicode_Resize",
"PyUnicode_RichCompare",
"PyUnicode_Split",
"PyUnicode_Splitlines",
"PyUnicode_Substring",
"PyUnicode_Tailmatch",
"PyUnicode_Translate",
"PyUnicode_Type",
"PyUnicode_WriteChar",
"PyWeakref_GetObject",
"PyWeakref_NewProxy",
"PyWeakref_NewRef",
"PyWrapperDescr_Type",
"PyWrapper_New",
"PyZip_Type",
"Py_AddPendingCall",
"Py_AtExit",
"Py_BuildValue",
"Py_BytesMain",
"Py_CompileString",
"Py_DecRef",
"Py_DecodeLocale",
"Py_EncodeLocale",
"Py_EndInterpreter",
"Py_EnterRecursiveCall",
"Py_Exit",
"Py_FatalError",
"Py_FileSystemDefaultEncodeErrors",
"Py_FileSystemDefaultEncoding",
"Py_Finalize",
"Py_FinalizeEx",
"Py_GenericAlias",
"Py_GenericAliasType",
"Py_GetArgcArgv",
"Py_GetBuildInfo",
"Py_GetCompiler",
"Py_GetCopyright",
"Py_GetExecPrefix",
"Py_GetPath",
"Py_GetPlatform",
"Py_GetPrefix",
"Py_GetProgramFullPath",
"Py_GetProgramName",
"Py_GetPythonHome",
"Py_GetRecursionLimit",
"Py_GetVersion",
"Py_HasFileSystemDefaultEncoding",
"Py_IncRef",
"Py_Initialize",
"Py_InitializeEx",
"Py_Is",
"Py_IsFalse",
"Py_IsInitialized",
"Py_IsNone",
"Py_IsTrue",
"Py_LeaveRecursiveCall",
"Py_Main",
"Py_MakePendingCalls",
"Py_NewInterpreter",
"Py_NewRef",
"Py_ReprEnter",
"Py_ReprLeave",
"Py_SetPath",
"Py_SetProgramName",
"Py_SetPythonHome",
"Py_SetRecursionLimit",
"Py_UTF8Mode",
"Py_VaBuildValue",
"Py_Version",
"Py_XNewRef",
"_PyArg_ParseTupleAndKeywords_SizeT",
"_PyArg_ParseTuple_SizeT",
"_PyArg_Parse_SizeT",
"_PyArg_VaParseTupleAndKeywords_SizeT",
"_PyArg_VaParse_SizeT",
"_PyErr_BadInternalCall",
"_PyObject_CallFunction_SizeT",
"_PyObject_CallMethod_SizeT",
"_PyObject_GC_New",
"_PyObject_GC_NewVar",
"_PyObject_GC_Resize",
"_PyObject_New",
"_PyObject_NewVar",
"_PyState_AddModule",
"_PyThreadState_Init",
"_PyThreadState_Prealloc",
"_PyWeakref_CallableProxyType",
"_PyWeakref_ProxyType",
"_PyWeakref_RefType",
"_Py_BuildValue_SizeT",
"_Py_CheckRecursiveCall",
"_Py_Dealloc",
"_Py_DecRef",
"_Py_EllipsisObject",
"_Py_FalseStruct",
"_Py_IncRef",
"_Py_NoneStruct",
"_Py_NotImplementedStruct",
"_Py_SwappedOp",
"_Py_TrueStruct",
"_Py_VaBuildValue_SizeT",
)
| 26.701632
| 70
| 0.702357
|
b32067ffea9dda5975dc7108255d676c89f97223
| 8,306
|
py
|
Python
|
SqueezePredictors.py
|
kirill-pinigin/DeepFaceRecognitron
|
7f109328360d42c8955bc787d13f2b97964d3751
|
[
"Apache-2.0"
] | null | null | null |
SqueezePredictors.py
|
kirill-pinigin/DeepFaceRecognitron
|
7f109328360d42c8955bc787d13f2b97964d3751
|
[
"Apache-2.0"
] | null | null | null |
SqueezePredictors.py
|
kirill-pinigin/DeepFaceRecognitron
|
7f109328360d42c8955bc787d13f2b97964d3751
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
from torchvision import models
import torch.nn.init as init
from NeuralModels import SILU, Perceptron
from DeepFaceRecognitron import IMAGE_SIZE, DIMENSION, CHANNELS
LATENT_DIM = int(512)
LATENT_DIM_2 = int(LATENT_DIM // 2) if LATENT_DIM > 2 else 1
class FireConvNorm(nn.Module):
def __init__(self, inplanes=128, squeeze_planes=11,
expand1x1_planes=11, expand3x3_planes=11, activation = nn.ReLU()):
super(FireConvNorm, self).__init__()
self.outplanes = int(expand1x1_planes + expand3x3_planes)
self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1)
self.activation = activation
self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes,
kernel_size=1)
self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes,
kernel_size=3, padding=1)
self.norm_sq = nn.BatchNorm2d(squeeze_planes)
self.norm1x1 = nn.BatchNorm2d(expand1x1_planes)
self.norm3x3 = nn.BatchNorm2d(expand3x3_planes)
def ConfigureNorm(self):
self.norm_sq = nn.BatchNorm2d(self.squeeze.out_channels)
self.norm1x1 = nn.BatchNorm2d(self.expand3x3.out_channels)
self.norm3x3 = nn.BatchNorm2d(self.expand3x3.out_channels)
def forward(self, x):
x = self.activation(self.norm_sq(self.squeeze(x)))
return torch.cat([
self.activation(self.norm1x1(self.expand1x1(x))),
self.activation(self.norm3x3(self.expand3x3(x)))], 1)
class SqueezeSimplePredictor(nn.Module):
def __init__(self, activation = nn.ReLU(), pretrained = True):
super(SqueezeSimplePredictor, self).__init__()
self.activation = activation
first_norm_layer = nn.BatchNorm2d(96)
final_norm_layer = nn.BatchNorm2d(DIMENSION)
self.conv1 = nn.Conv2d(CHANNELS, 96, kernel_size=7, stride=2)
self.norm1 = first_norm_layer
self.downsample1 = nn.MaxPool2d(kernel_size=3, stride=2)
self.fire1 = FireConvNorm(96, 16, 64, 64, activation=activation)
self.fire2 = FireConvNorm(128, 16, 64, 64, activation=activation)
self.fire3 = FireConvNorm(128, 32, 128, 128, activation=activation)
self.downsample2 = nn.MaxPool2d(kernel_size=3, stride=2)
self.fire4 = FireConvNorm(256, 32, 128, 128, activation=activation)
self.fire5 = FireConvNorm(256, 48, 192, 192, activation=activation)
self.fire6 = FireConvNorm(384, 48, 192, 192, activation=activation)
self.fire7 = FireConvNorm(384, 64, 256, 256, activation=activation)
self.downsample3 = nn.MaxPool2d(kernel_size=3, stride=2)
self.fire8 = FireConvNorm(512, 64, 256, 256, activation=activation)
if pretrained:
model = models.squeezenet1_0(pretrained=True).features
if CHANNELS == 3:
self.conv1 = model[0]
self.fire1.squeeze = model[3].squeeze
self.fire1.expand1x1 = model[3].expand1x1
self.fire1.expand3x3 = model[3].expand3x3
self.fire1.ConfigureNorm()
self.fire2.squeeze = model[4].squeeze
self.fire2.expand1x1 = model[4].expand1x1
self.fire2.expand3x3 = model[4].expand3x3
self.fire2.ConfigureNorm()
self.fire3.squeeze = model[5].squeeze
self.fire3.expand1x1 = model[5].expand1x1
self.fire3.expand3x3 = model[5].expand3x3
self.fire3.ConfigureNorm()
self.fire4.squeeze = model[7].squeeze
self.fire4.expand1x1 = model[7].expand1x1
self.fire4.expand3x3 = model[7].expand3x3
self.fire4.ConfigureNorm()
self.fire5.squeeze = model[8].squeeze
self.fire5.expand1x1 = model[8].expand1x1
self.fire5.expand3x3 = model[8].expand3x3
self.fire5.ConfigureNorm()
self.fire6.squeeze = model[9].squeeze
self.fire6.expand1x1 = model[9].expand1x1
self.fire6.expand3x3 = model[9].expand3x3
self.fire6.ConfigureNorm()
self.fire7.squeeze = model[10].squeeze
self.fire7.expand1x1 = model[10].expand1x1
self.fire7.expand3x3 = model[10].expand3x3
self.fire7.ConfigureNorm()
self.fire8.squeeze = model[12].squeeze
self.fire8.expand1x1 = model[12].expand1x1
self.fire8.expand3x3 = model[12].expand3x3
self.fire8.ConfigureNorm()
else:
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_uniform(m.weight)
if m.bias is not None:
init.constant(m.bias, 0)
self.predictor = nn.Sequential(
nn.Dropout(p=0),
nn.Conv2d(LATENT_DIM, DIMENSION, kernel_size=1),
final_norm_layer,
activation,
nn.AdaptiveAvgPool2d(1),
)
def forward(self, x):
x = self.conv1(x)
x = self.norm1(x)
x = self.activation(x)
x = self.downsample1(x)
x = self.fire1(x)
x = self.fire2(x)
x = self.fire3(x)
x = self.downsample2(x)
x = self.fire4(x)
x = self.fire5(x)
x = self.fire6(x)
x = self.fire7(x)
x = self.downsample3(x)
x = self.fire8(x)
x = self.predictor(x)
x = x.view(x.size(0), -1)
return x
class SqueezeResidualPredictor(SqueezeSimplePredictor):
def __init__(self, activation=nn.ReLU(), pretrained=True):
super(SqueezeResidualPredictor, self).__init__(activation, pretrained)
final_norm_layer = nn.BatchNorm2d(LATENT_DIM)
self.features = nn.Sequential(
nn.Conv2d(LATENT_DIM, LATENT_DIM, kernel_size=1),
final_norm_layer,
activation,
nn.AdaptiveAvgPool2d(1),
)
reduce_number = int((LATENT_DIM + CHANNELS) / 2.0)
sub_dimension = reduce_number if reduce_number > CHANNELS else (reduce_number + CHANNELS)
self.predictor = nn.Sequential(
Perceptron(LATENT_DIM, sub_dimension),
nn.Dropout(p=0),
activation,
Perceptron(sub_dimension, CHANNELS),
)
def forward(self, x):
x = self.conv1(x)
x = self.norm1(x)
x = self.activation(x)
x = self.downsample1(x)
f1 = self.fire1(x)
x = self.fire2(f1)
x = torch.add(x,f1)
x = self.fire3(x)
d2 = self.downsample2(x)
x = self.fire4(d2)
x = torch.add(x, d2)
f5 = self.fire5(x)
x = self.fire6(f5)
x = torch.add(x, f5)
x = self.fire7(x)
d3 = self.downsample3(x)
x = self.fire8(d3)
x = torch.add(x, d3)
x = self.features(x)
x = self.predictor(x)
return x
class SqueezeShuntPredictor(SqueezeResidualPredictor):
def __init__(self, activation = nn.ReLU(), type_norm = 'batch', pretrained = False):
super(SqueezeShuntPredictor, self).__init__(activation=activation, type_norm=type_norm, pretrained=pretrained)
self.shunt1 = nn.Sequential(nn.ReLU(), nn.Conv2d(96,128, kernel_size=1), nn.Sigmoid())
self.shunt2 = nn.Sequential(nn.ReLU(), nn.Conv2d(128, 256, kernel_size=1), nn.Sigmoid())
self.shunt3 = nn.Sequential(nn.ReLU(), nn.Conv2d(256, 384, kernel_size=1), nn.Sigmoid())
self.shunt4 = nn.Sequential(nn.ReLU(), nn.Conv2d(384, 512, kernel_size=1), nn.Sigmoid())
def forward(self, x):
x = self.conv1(x)
x = self.norm1(x)
x = self.activation(x)
d1 = self.downsample1(x)
f1 = self.fire1(d1)
s1 = self.shunt1(d1)
x = torch.mul(f1, s1)
x = self.fire2(x)
s2 = self.shunt2(x)
x = self.fire3(x)
x = torch.mul(x, s2)
d2 = self.downsample2(x)
x = self.fire4(d2)
s3 = self.shunt3(x)
f5 = self.fire5(x)
x = torch.mul(f5, s3)
x = self.fire6(x)
s4 = self.shunt4(x)
x = self.fire7(x)
x = torch.mul(x, s4)
d3 = self.downsample3(x)
x = self.fire8(d3)
x = self.features(x)
x = self.predictor(x)
return x
| 37.414414
| 118
| 0.599205
|
af5d70cc6540350b770e8c55d6ae0d130c566b40
| 2,521
|
py
|
Python
|
tests/test_get_environment_variables.py
|
odant/conan-get_vcvars
|
b8ec3c712d18499477c5c44c71b177d1c873e508
|
[
"MIT"
] | null | null | null |
tests/test_get_environment_variables.py
|
odant/conan-get_vcvars
|
b8ec3c712d18499477c5c44c71b177d1c873e508
|
[
"MIT"
] | null | null | null |
tests/test_get_environment_variables.py
|
odant/conan-get_vcvars
|
b8ec3c712d18499477c5c44c71b177d1c873e508
|
[
"MIT"
] | null | null | null |
import unittest
from unittest.mock import patch
import get_vcvars
import subprocess
from conans.errors import ConanException
set_output = r'''
**********************************************************************
** Visual Studio 2017 Developer Command Prompt v15.5.5
** Copyright (c) 2017 Microsoft Corporation
**********************************************************************
[vcvarsall.bat] Environment initialized for: 'x64'
__BEGINS__
INCLUDE=C:\Program Files (x86)\Windows Kits\10\include\10.0.16299.0\ucrt;
LIB=C:\Program Files (x86)\Windows Kits\10\lib\10.0.16299.0\ucrt\x64;
LIBPATH=C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\VC\Tools\MSVC\14.12.25827\lib\x64;
Path=C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\VC\Tools\MSVC\14.12.25827\bin\HostX64\x64;C:\Program Files (x86)\Windows Kits\10\bin\10.0.16299.0\x64;
'''
class Test_get_environment_variables(unittest.TestCase):
@patch("subprocess.check_output")
def test_normal(self, mock_subprocess_check_output):
mock_subprocess_check_output.return_value = set_output.encode()
vcvarsall = "C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Community\\VC\\Auxiliary\\Build\\vcvarsall.bat"
args = ["amd64", "-vcvars_ver=14.0"]
result = get_vcvars.get_environment_variables(vcvarsall, args)
self.assertEqual(result, {
"INCLUDE": "C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.16299.0\\ucrt;",
"LIB": "C:\\Program Files (x86)\\Windows Kits\\10\\lib\\10.0.16299.0\\ucrt\\x64;",
"LIBPATH": "C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Community\\VC\\Tools\MSVC\\14.12.25827\\lib\\x64;",
"PATH": "C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Community\\VC\\Tools\\MSVC\\14.12.25827\\bin\\HostX64\\x64;C:\\Program Files (x86)\\Windows Kits\\10\\bin\\10.0.16299.0\\x64;"
})
cmd = "call \"" + vcvarsall + "\" amd64 -vcvars_ver=14.0 && echo __BEGINS__ && set"
mock_subprocess_check_output.assert_called_once_with(cmd, shell=True)
def test_expeption(self):
vcvarsall = "C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Community\\VC\\Auxiliary\\Build\\vcvarsall.ba_"
args = ["amd64", "-vcvars_ver=14.0"]
with self.assertRaises(ConanException):
get_vcvars.get_environment_variables(vcvarsall, args)
if __name__ == "__main__":
unittest.main()
| 47.566038
| 204
| 0.638239
|
50cedaf8bfddb6f72ac6d90ef2cb2bde92bf99af
| 12,984
|
py
|
Python
|
src/lib/access.py
|
cognibit/Text-Normalization-Demo
|
36355f4a2c5187948fe786b7318259151f9a9db6
|
[
"Apache-2.0"
] | 66
|
2018-06-04T05:19:49.000Z
|
2022-01-08T23:15:13.000Z
|
source/dnc/access.py
|
Octavian-ai/genetic-curriculum
|
c409681be92880793c021586f35f0ac2af5e5003
|
[
"Apache-2.0"
] | 1
|
2019-07-02T14:44:44.000Z
|
2019-07-03T14:54:24.000Z
|
src/lib/access.py
|
cognibit/Text-Normalization-Demo
|
36355f4a2c5187948fe786b7318259151f9a9db6
|
[
"Apache-2.0"
] | 7
|
2018-06-12T14:22:00.000Z
|
2022-02-22T01:18:12.000Z
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DNC access modules."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import sonnet as snt
import tensorflow as tf
from . import addressing
from . import util
AccessState = collections.namedtuple('AccessState', (
'memory', 'read_weights', 'write_weights', 'linkage', 'usage'))
def _erase_and_write(memory, address, reset_weights, values):
"""Module to erase and write in the external memory.
Erase operation:
M_t'(i) = M_{t-1}(i) * (1 - w_t(i) * e_t)
Add operation:
M_t(i) = M_t'(i) + w_t(i) * a_t
where e are the reset_weights, w the write weights and a the values.
Args:
memory: 3-D tensor of shape `[batch_size, memory_size, word_size]`.
address: 3-D tensor `[batch_size, num_writes, memory_size]`.
reset_weights: 3-D tensor `[batch_size, num_writes, word_size]`.
values: 3-D tensor `[batch_size, num_writes, word_size]`.
Returns:
3-D tensor of shape `[batch_size, num_writes, word_size]`.
"""
with tf.name_scope('erase_memory', values=[memory, address, reset_weights]):
expand_address = tf.expand_dims(address, 3)
reset_weights = tf.expand_dims(reset_weights, 2)
weighted_resets = expand_address * reset_weights
reset_gate = tf.reduce_prod(1 - weighted_resets, [1])
memory *= reset_gate
with tf.name_scope('additive_write', values=[memory, address, values]):
add_matrix = tf.matmul(address, values, adjoint_a=True)
memory += add_matrix
return memory
class MemoryAccess(snt.RNNCore):
"""Access module of the Differentiable Neural Computer.
This memory module supports multiple read and write heads. It makes use of:
* `addressing.TemporalLinkage` to track the temporal ordering of writes in
memory for each write head.
* `addressing.FreenessAllocator` for keeping track of memory usage, where
usage increase when a memory location is written to, and decreases when
memory is read from that the controller says can be freed.
Write-address selection is done by an interpolation between content-based
lookup and using unused memory.
Read-address selection is done by an interpolation of content-based lookup
and following the link graph in the forward or backwards read direction.
"""
def __init__(self,
memory_size=128,
word_size=20,
num_reads=1,
num_writes=1,
name='memory_access'):
"""Creates a MemoryAccess module.
Args:
memory_size: The number of memory slots (N in the DNC paper).
word_size: The width of each memory slot (W in the DNC paper)
num_reads: The number of read heads (R in the DNC paper).
num_writes: The number of write heads (fixed at 1 in the paper).
name: The name of the module.
"""
super(MemoryAccess, self).__init__(name=name)
self._memory_size = memory_size
self._word_size = word_size
self._num_reads = num_reads
self._num_writes = num_writes
self._write_content_weights_mod = addressing.CosineWeights(
num_writes, word_size, name='write_content_weights')
self._read_content_weights_mod = addressing.CosineWeights(
num_reads, word_size, name='read_content_weights')
self._linkage = addressing.TemporalLinkage(memory_size, num_writes)
self._freeness = addressing.Freeness(memory_size)
def _build(self, inputs, prev_state):
"""Connects the MemoryAccess module into the graph.
Args:
inputs: tensor of shape `[batch_size, input_size]`. This is used to
control this access module.
prev_state: Instance of `AccessState` containing the previous state.
Returns:
A tuple `(output, next_state)`, where `output` is a tensor of shape
`[batch_size, num_reads, word_size]`, and `next_state` is the new
`AccessState` named tuple at the current time t.
"""
inputs = self._read_inputs(inputs)
# Update usage using inputs['free_gate'] and previous read & write weights.
usage = self._freeness(
write_weights=prev_state.write_weights,
free_gate=inputs['free_gate'],
read_weights=prev_state.read_weights,
prev_usage=prev_state.usage)
# Write to memory.
write_weights = self._write_weights(inputs, prev_state.memory, usage)
memory = _erase_and_write(
prev_state.memory,
address=write_weights,
reset_weights=inputs['erase_vectors'],
values=inputs['write_vectors'])
linkage_state = self._linkage(write_weights, prev_state.linkage)
# Read from memory.
read_weights = self._read_weights(
inputs,
memory=memory,
prev_read_weights=prev_state.read_weights,
link=linkage_state.link)
read_words = tf.matmul(read_weights, memory)
return (read_words, AccessState(
memory=memory,
read_weights=read_weights,
write_weights=write_weights,
linkage=linkage_state,
usage=usage))
def _read_inputs(self, inputs):
"""Applies transformations to `inputs` to get control for this module."""
def _linear(first_dim, second_dim, name, activation=None):
"""Returns a linear transformation of `inputs`, followed by a reshape."""
linear = snt.Linear(first_dim * second_dim, name=name)(inputs)
if activation is not None:
linear = activation(linear, name=name + '_activation')
return tf.reshape(linear, [-1, first_dim, second_dim])
# v_t^i - The vectors to write to memory, for each write head `i`.
write_vectors = _linear(self._num_writes, self._word_size, 'write_vectors')
# e_t^i - Amount to erase the memory by before writing, for each write head.
erase_vectors = _linear(self._num_writes, self._word_size, 'erase_vectors',
tf.sigmoid)
# f_t^j - Amount that the memory at the locations read from at the previous
# time step can be declared unused, for each read head `j`.
free_gate = tf.sigmoid(
snt.Linear(self._num_reads, name='free_gate')(inputs))
# g_t^{a, i} - Interpolation between writing to unallocated memory and
# content-based lookup, for each write head `i`. Note: `a` is simply used to
# identify this gate with allocation vs writing (as defined below).
allocation_gate = tf.sigmoid(
snt.Linear(self._num_writes, name='allocation_gate')(inputs))
# g_t^{w, i} - Overall gating of write amount for each write head.
write_gate = tf.sigmoid(
snt.Linear(self._num_writes, name='write_gate')(inputs))
# \pi_t^j - Mixing between "backwards" and "forwards" positions (for
# each write head), and content-based lookup, for each read head.
num_read_modes = 1 + 2 * self._num_writes
read_mode = snt.BatchApply(tf.nn.softmax)(
_linear(self._num_reads, num_read_modes, name='read_mode'))
# Parameters for the (read / write) "weights by content matching" modules.
write_keys = _linear(self._num_writes, self._word_size, 'write_keys')
write_strengths = snt.Linear(self._num_writes, name='write_strengths')(
inputs)
read_keys = _linear(self._num_reads, self._word_size, 'read_keys')
read_strengths = snt.Linear(self._num_reads, name='read_strengths')(inputs)
result = {
'read_content_keys': read_keys,
'read_content_strengths': read_strengths,
'write_content_keys': write_keys,
'write_content_strengths': write_strengths,
'write_vectors': write_vectors,
'erase_vectors': erase_vectors,
'free_gate': free_gate,
'allocation_gate': allocation_gate,
'write_gate': write_gate,
'read_mode': read_mode,
}
return result
def _write_weights(self, inputs, memory, usage):
"""Calculates the memory locations to write to.
This uses a combination of content-based lookup and finding an unused
location in memory, for each write head.
Args:
inputs: Collection of inputs to the access module, including controls for
how to chose memory writing, such as the content to look-up and the
weighting between content-based and allocation-based addressing.
memory: A tensor of shape `[batch_size, memory_size, word_size]`
containing the current memory contents.
usage: Current memory usage, which is a tensor of shape `[batch_size,
memory_size]`, used for allocation-based addressing.
Returns:
tensor of shape `[batch_size, num_writes, memory_size]` indicating where
to write to (if anywhere) for each write head.
"""
with tf.name_scope('write_weights', values=[inputs, memory, usage]):
# c_t^{w, i} - The content-based weights for each write head.
write_content_weights = self._write_content_weights_mod(
memory, inputs['write_content_keys'],
inputs['write_content_strengths'])
# a_t^i - The allocation weights for each write head.
write_allocation_weights = self._freeness.write_allocation_weights(
usage=usage,
write_gates=(inputs['allocation_gate'] * inputs['write_gate']),
num_writes=self._num_writes)
# Expands gates over memory locations.
allocation_gate = tf.expand_dims(inputs['allocation_gate'], -1)
write_gate = tf.expand_dims(inputs['write_gate'], -1)
# w_t^{w, i} - The write weightings for each write head.
return write_gate * (allocation_gate * write_allocation_weights +
(1 - allocation_gate) * write_content_weights)
def _read_weights(self, inputs, memory, prev_read_weights, link):
"""Calculates read weights for each read head.
The read weights are a combination of following the link graphs in the
forward or backward directions from the previous read position, and doing
content-based lookup. The interpolation between these different modes is
done by `inputs['read_mode']`.
Args:
inputs: Controls for this access module. This contains the content-based
keys to lookup, and the weightings for the different read modes.
memory: A tensor of shape `[batch_size, memory_size, word_size]`
containing the current memory contents to do content-based lookup.
prev_read_weights: A tensor of shape `[batch_size, num_reads,
memory_size]` containing the previous read locations.
link: A tensor of shape `[batch_size, num_writes, memory_size,
memory_size]` containing the temporal write transition graphs.
Returns:
A tensor of shape `[batch_size, num_reads, memory_size]` containing the
read weights for each read head.
"""
with tf.name_scope(
'read_weights', values=[inputs, memory, prev_read_weights, link]):
# c_t^{r, i} - The content weightings for each read head.
content_weights = self._read_content_weights_mod(
memory, inputs['read_content_keys'], inputs['read_content_strengths'])
# Calculates f_t^i and b_t^i.
forward_weights = self._linkage.directional_read_weights(
link, prev_read_weights, forward=True)
backward_weights = self._linkage.directional_read_weights(
link, prev_read_weights, forward=False)
backward_mode = inputs['read_mode'][:, :, :self._num_writes]
forward_mode = (
inputs['read_mode'][:, :, self._num_writes:2 * self._num_writes])
content_mode = inputs['read_mode'][:, :, 2 * self._num_writes]
read_weights = (
tf.expand_dims(content_mode, 2) * content_weights + tf.reduce_sum(
tf.expand_dims(forward_mode, 3) * forward_weights, 2) +
tf.reduce_sum(tf.expand_dims(backward_mode, 3) * backward_weights, 2))
return read_weights
@property
def state_size(self):
"""Returns a tuple of the shape of the state tensors."""
return AccessState(
memory=tf.TensorShape([self._memory_size, self._word_size]),
read_weights=tf.TensorShape([self._num_reads, self._memory_size]),
write_weights=tf.TensorShape([self._num_writes, self._memory_size]),
linkage=self._linkage.state_size,
usage=self._freeness.state_size)
@property
def output_size(self):
"""Returns the output shape."""
return tf.TensorShape([self._num_reads, self._word_size])
| 40.702194
| 80
| 0.692699
|
300e82f9e89a256d8e5297702d02d2fcc387ff80
| 7,040
|
py
|
Python
|
HW3/deployment/dash_example_web.py
|
qswang1988/data_mining_homework
|
0ebd10f278170af3509ff3ccca60311c5082ffe2
|
[
"MIT"
] | null | null | null |
HW3/deployment/dash_example_web.py
|
qswang1988/data_mining_homework
|
0ebd10f278170af3509ff3ccca60311c5082ffe2
|
[
"MIT"
] | null | null | null |
HW3/deployment/dash_example_web.py
|
qswang1988/data_mining_homework
|
0ebd10f278170af3509ff3ccca60311c5082ffe2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# =============================================================================
# Created By : luis-eduardo@dsv.su.se
# Created Date: 2020/06/30
# =============================================================================
"""
Course: Data Mining for Computer and Systems Sciences
Lab 5: Model Deployment
Creates a web platform to interact with the webserver
"""
# =============================================================================
# Imports
# =============================================================================
import helper_dash_example
import dash
from dash.dependencies import Input, Output, State
from pathlib import Path
import numpy as np
import pandas as pd
import pickle
# =============================================================================
# Main
# =============================================================================
# Relative paths respect to current file
THIS_FILE_PATH = str(Path(__file__).parent.absolute())+"/"
filename_to_load = THIS_FILE_PATH + "model.pickle"
# Variables to create the data structure from the web interface
#['Pregnancies', 'Glucose', 'BloodPressure', 'SkinThickness', 'Insulin','BMI', 'DiabetesPedigreeFunction', 'Age']
#dataset_colnames = ['P', 'G', 'BP', 'S', 'I', 'BMI','D','A']
dataset_colnames = ['Pregnancies', 'Glucose', 'BloodPressure', 'SkinThickness', 'Insulin','BMI', 'DiabetesPedigreeFunction', 'Age']
sample = None # DataFrame with the data that the user has input in the webpage
# Load trained model
loaded_model = None
with open(filename_to_load, "rb") as readFile:
loaded_model = pickle.load(readFile)
# Styling for HTML website
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
# Create web server
app = dash.Dash("dami_analytics_lab", external_stylesheets=external_stylesheets)
# In the additional file `helper_dash_example` is hidden all webpage' structure
app.layout = helper_dash_example.app_html_layout
# =============================================================================
# Callbacks to setup the interaction between webpage and controls
# The next syntax is specific from the dash library, documentation can be found
# on https://dash.plotly.com/
# =============================================================================
# Sliders
# Generic function to return the string from a change in the web app
def update_value(value):
return str(value)
'''
#['Pregnancies', 'Glucose', 'BloodPressure', 'SkinThickness','Insulin','BMI', 'DiabetesPedigreeFunction', 'Age']
# [P,G,BP,S,I,BMI,D,A]
[State('slider-P', 'value'),
State('slider-G', 'value'),
State('slider-BP', 'value'),
State('slider-S', 'value'),
State('slider-I', 'value'),
State('slider-BMI', 'value'),
State('slider-D', 'value'),
State('slider-A', 'value'),
]
'''
@app.callback(
Output(component_id='value-slider-P', component_property='children'),
[Input(component_id='slider-P', component_property='value')]
)
def update_area(value):
return update_value(value)
@app.callback(
Output(component_id='value-slider-G', component_property='children'),
[Input(component_id='slider-G', component_property='value')]
)
def update_perimeter(value):
return update_value(value)
@app.callback(
Output(component_id='value-slider-BP', component_property='children'),
[Input(component_id='slider-BP', component_property='value')]
)
def update_compactness(value):
return update_value(value)
@app.callback(
Output(component_id='value-slider-S', component_property='children'),
[Input(component_id='slider-S', component_property='value')]
)
def update_length_kernel(value):
return update_value(value)
@app.callback(
Output(component_id='value-slider-I', component_property='children'),
[Input(component_id='slider-I', component_property='value')]
)
def update_width_kernel(value):
return update_value(value)
@app.callback(
Output(component_id='value-slider-BMI', component_property='children'),
[Input(component_id='slider-BMI', component_property='value')]
)
def update_asymm_coeff(value):
return update_value(value)
@app.callback(
Output(component_id='value-slider-D', component_property='children'),
[Input(component_id='slider-D', component_property='value')]
)
def update_length_kernel_groove(value):
return update_value(value)
@app.callback(
Output(component_id='value-slider-A', component_property='children'),
[Input(component_id='slider-A', component_property='value')]
)
def update_length_kernel_groove_(value):
return update_value(value)
# Visualization
@app.callback(
Output(component_id='graph-histogram', component_property='figure'),
[Input(component_id='dropdown-histogram', component_property='value'),
Input(component_id='submit', component_property='n_clicks')]
)
def update_histogram(colname, n_clicks):
return helper_dash_example.update_histogram(colname, sample)
@app.callback(
Output(component_id='graph-scatter', component_property='figure'),
[Input(component_id='dropdown-scatter-1', component_property='value'),
Input(component_id='dropdown-scatter-2', component_property='value'),
Input(component_id='submit', component_property='n_clicks')]
)
def update_scatter(col1, col2, n_clicks):
return helper_dash_example.update_scatter(col1, col2, sample)
# Classification Button
#['P', 'G', 'BP', 'S', 'I', 'BMI','D','A']
@app.callback(
Output(component_id='classification-result', component_property='children'),
[Input(component_id='submit', component_property='n_clicks')],
[State('slider-P', 'value'),
State('slider-G', 'value'),
State('slider-BP', 'value'),
State('slider-S', 'value'),
State('slider-I', 'value'),
State('slider-BMI', 'value'),
State('slider-D', 'value'),
State('slider-A', 'value'),
]
)
def execute_classification(n_clicks,Pregnancies,Glucose,BloodPressure,SkinThickness,Insulin,BMI,DiabetesPedigreeFunction,Age):
"""
Main method. Loads the trained model, applies the input data and returns a class
"""
if(n_clicks == None): # When the application open
return "Press below to execute the classification"
else:
# The sliders' values are already parsed to numeric values
# Here we create a DataFrame with the input data
data_from_user = [Pregnancies,Glucose,BloodPressure,SkinThickness,Insulin,BMI,DiabetesPedigreeFunction,Age]
global sample
sample = pd.DataFrame(data=[data_from_user], columns=dataset_colnames)
# Execute the prediction using the loaded trained model.
prediction = loaded_model.predict(sample)
# Return final message
prediction_labels = ["Negative", "Positive"]
return "The predicted class of the input data is: ["+ str(prediction[0]) +":" + prediction_labels[prediction[0]] + "]"
# Run the web server when this script is executed in Python
if __name__ == "__main__":
app.run_server(debug=True)
| 36.102564
| 131
| 0.655114
|
f41fff7cc8d5983f8a3e22fce084ac77a6202b06
| 2,905
|
py
|
Python
|
tests/functional/regressions/test_issue228.py
|
remorses/tartiflette-whl
|
92bed13de130a7a88278d7019314135e01281259
|
[
"MIT"
] | 530
|
2019-06-04T11:45:36.000Z
|
2022-03-31T09:29:56.000Z
|
tests/functional/regressions/test_issue228.py
|
remorses/tartiflette-whl
|
92bed13de130a7a88278d7019314135e01281259
|
[
"MIT"
] | 242
|
2019-06-04T11:53:08.000Z
|
2022-03-28T07:06:27.000Z
|
tests/functional/regressions/test_issue228.py
|
remorses/tartiflette-whl
|
92bed13de130a7a88278d7019314135e01281259
|
[
"MIT"
] | 36
|
2019-06-21T06:40:27.000Z
|
2021-11-04T13:11:16.000Z
|
import pytest
from tartiflette import Directive, Resolver, create_engine
@pytest.mark.asyncio
async def test_issue228_1():
@Resolver("Query.a", schema_name="issue228_1")
async def lol(*_args, **_kwargs):
return {"ninja": "Ohio"}
_engine = await create_engine(
sdl="""
type Query {
a: Lol
}""",
modules=[
{
"name": "tests.functional.regressions.issue228.a_module",
"config": {"val": "Blah!!"},
}
],
schema_name="issue228_1",
)
assert await _engine.execute("query aquery { a { ninja } }") == {
"data": {"a": {"ninja": "Ohio Ninja Blah!!GO !"}}
}
@pytest.mark.asyncio
async def test_issue228_2():
@Resolver("Query.a", schema_name="issue228_2")
async def lol(*_args, **_kwargs):
return {"ninja": "Ohio"}
_engine = await create_engine(
sdl="""
type Query {
a: Lol
}""",
modules=[
{
"name": "tests.functional.regressions.issue228.b_module",
"config": {"val": "Blah!!"},
}
],
schema_name="issue228_2",
)
assert await _engine.execute("query aquery { a { ninja } }") == {
"data": {"a": {"ninja": "Ohio NinjaB BBlah!!GO !B"}}
}
@pytest.mark.asyncio
async def test_issue228_3():
from tartiflette.types.exceptions.tartiflette import GraphQLSchemaError
sdl = """
directive @tartifyMe on FIELD_DEFINITION
"""
@Directive("tartifyMe", schema_name="issue228_3")
class TartifyYourself:
@staticmethod
def on_pre_output_coercion(*_, **_kwargs):
pass
@staticmethod
def on_post_input_coercion(*_, **_kwargs):
pass
@staticmethod
def on_field_execution(*_, **_kwargs):
pass
@staticmethod
def on_argument_execution(*_, **_kwargs):
pass
@staticmethod
def on_introspection(*_, **_kwargs):
pass
def on_schema_subscription(self, *_, **_kwargs):
pass
def on_schema_execution(self, *_, **_kwargs):
pass
with pytest.raises(
GraphQLSchemaError,
match="""
0: Missing Query Type < Query >.
1: Directive tartifyMe Method on_pre_output_coercion is not awaitable.
2: Directive tartifyMe Method on_introspection is not awaitable.
3: Directive tartifyMe Method on_post_input_coercion is not awaitable.
4: Directive tartifyMe Method on_argument_execution is not awaitable.
5: Directive tartifyMe Method on_field_execution is not awaitable.
6: Directive tartifyMe Method on_schema_execution is not awaitable.
7: Directive tartifyMe Method on_schema_subscription is not an Async Generator.""",
):
await create_engine(sdl=sdl, schema_name="issue228_3")
| 26.651376
| 83
| 0.59346
|
766bff75b4bb104f738f25a4dbe937d9509923d6
| 7,213
|
py
|
Python
|
docs/conf.py
|
TomomasaTakatori/metadata
|
e3cb35fe220e54db7a3c5804e0545bf94a01077a
|
[
"Apache-2.0"
] | 147
|
2019-03-27T01:40:34.000Z
|
2022-03-07T08:51:20.000Z
|
docs/conf.py
|
TomomasaTakatori/metadata
|
e3cb35fe220e54db7a3c5804e0545bf94a01077a
|
[
"Apache-2.0"
] | 265
|
2019-03-28T16:54:43.000Z
|
2021-11-19T21:57:33.000Z
|
docs/conf.py
|
TomomasaTakatori/metadata
|
e3cb35fe220e54db7a3c5804e0545bf94a01077a
|
[
"Apache-2.0"
] | 81
|
2019-03-28T16:49:25.000Z
|
2021-11-25T07:43:49.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# Initially all the files in docs were generated by the https://www.sphinx-doc.org/en/master/man/sphinx-quickstart.html tool. There is no need to run this tool again.
# The docs can now be generated using the `make html` command which calls the https://www.sphinx-doc.org/en/master/man/sphinx-build.html tool
# Afterwards I made many changes to the generated files
# Changes I made:
# conf.py: Added the package path to sys.path
# conf.py: Added extensions: sphinx.ext.autodoc, sphinx.ext.napoleon
# conf.py: Set the project information
# conf.py: Set the theme to sphinx_rtd_theme
# *.rst: Added ":imported-members:" to all automodule invocations so that the members imported in __init__.py are included
# *.rst: Reworked the files removing empty or unneeded sections
# *.rst: Manually split out some modules and classes into separate pages: kfp.Client, kfp.extensions
# *.rst: Fully reworked the kfp.rst and index.rst pages
# When SDK code changes are submitted to master, the GitHub sends a signal to ReadTheDocs using a webhook.
# RTD automatically pulls the branch and generates the documentation at https://kf-Metadata.readthedocs.io
import os
import sys
import ml_metadata
import sphinx_rtd_theme
# -- Project information -----------------------------------------------------
project = 'Kubeflow Metadata'
copyright = '2019, Google'
author = 'Google'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinx_autodoc_typehints',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'KubeflowMetadatadoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'KubeflowMetadata.tex', 'Kubeflow Metadata Documentation',
'Google', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'kubeflowMetadata', 'Kubeflow Metadata Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'KubeflowMetadata', 'Kubeflow Metadata Documentation',
author, 'KubeflowMetadata', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
| 33.087156
| 166
| 0.68224
|
1530752ce21081661790fa77c3461c9a8f60745a
| 2,123
|
py
|
Python
|
pipelines/climate/jobs/impactlab_website/job_bcsd_orig_ir_dummy.py
|
ClimateImpactLab/pipelines
|
aa17823486f92b542e42ed19dec094a80f61fb34
|
[
"MIT"
] | null | null | null |
pipelines/climate/jobs/impactlab_website/job_bcsd_orig_ir_dummy.py
|
ClimateImpactLab/pipelines
|
aa17823486f92b542e42ed19dec094a80f61fb34
|
[
"MIT"
] | 14
|
2017-05-30T02:59:03.000Z
|
2017-09-28T00:49:50.000Z
|
pipelines/climate/jobs/impactlab_website/job_bcsd_orig_ir_dummy.py
|
ClimateImpactLab/pipelines
|
aa17823486f92b542e42ed19dec094a80f61fb34
|
[
"MIT"
] | null | null | null |
'''
Dummy data for use in web development
This data is meant to be used for example purposes only. While the intention
is that this data be representative of the variables presented, it is not final
and should not be used in production.
'''
from __future__ import absolute_import
import os
import pipelines
import pipelines.climate.transformations as trn
from pipelines.climate.toolbox import (
load_climate_data,
weighted_aggregate_grid_to_regions,
bcsd_transform)
__author__ = 'Michael Delgado'
__contact__ = 'mdelgado@rhg.com'
__version__ = '0.0.1a1'
BCSD_orig_files = os.path.join(
'/shares/gcp/sources/BCSD-original/{rcp}/day/atmos/{variable}/r1i1p1/v1.0',
'{variable}_day_BCSD_{rcp}_r1i1p1_{model}_{{year}}.nc')
WRITE_PATH = os.path.join(
'/shares/gcp/outputs/diagnostics/web/gcp/climate',
'{variable}/{variable}_{model}_{pername}.nc')
ADDITIONAL_METADATA = dict(
description=__doc__.strip(),
author=__author__,
contact=__contact__,
version=__version__,
project='gcp',
team='climate',
geography='hierid',
weighting='areawt',
frequency='year_sample')
JOBS = [
dict(variable='tasmax', transformation=trn.tasmax_over_95F),
dict(variable='tasmin', transformation=trn.tasmin_under_32F),
dict(variable='tas', transformation=trn.average_seasonal_temp)]
PERIODS = [
dict(rcp='historical', pername='1986', years=list(range(1996, 1997)))
# dict(rcp='rcp85', pername='2020', years=list(range(2020, 2040))),
# dict(rcp='rcp85', pername='2040', years=list(range(2040, 2060))),
# dict(rcp='rcp85', pername='2080', years=list(range(2080, 2100)))
]
MODELS = [
dict(model='ACCESS1-0'),
dict(model='CESM1-BGC'),
dict(model='GFDL-ESM2M')]
AGGREGATIONS = [{'agglev': 'hierid', 'aggwt': 'areawt'}]
@pipelines.register('bcsd_orig_ir_dummy')
@pipelines.add_metadata(ADDITIONAL_METADATA)
@pipelines.read_patterns(BCSD_orig_files)
@pipelines.write_pattern(WRITE_PATH)
@pipelines.iterate(JOBS, PERIODS, MODELS, AGGREGATIONS)
@pipelines.run(workers=1)
def bcsd_orig_ir_dummy(*args, **kwargs):
return bcsd_transform
| 29.082192
| 79
| 0.723976
|
6a6a7ac62f09998bdcda2e0a44f9a36e15c8cade
| 2,286
|
py
|
Python
|
examples/stateful_lstm.py
|
StefOe/keras
|
a8eb2e97d0c16685dcd4ddf44a63cc2c4e9aa91f
|
[
"MIT"
] | null | null | null |
examples/stateful_lstm.py
|
StefOe/keras
|
a8eb2e97d0c16685dcd4ddf44a63cc2c4e9aa91f
|
[
"MIT"
] | null | null | null |
examples/stateful_lstm.py
|
StefOe/keras
|
a8eb2e97d0c16685dcd4ddf44a63cc2c4e9aa91f
|
[
"MIT"
] | null | null | null |
'''Example script showing how to use stateful RNNs
to model long sequences efficiently.
'''
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense, LSTM
# since we are using stateful rnn tsteps can be set to 1
tsteps = 1
batch_size = 25
epochs = 25
# number of elements ahead that are used to make the prediction
lahead = 1
def gen_cosine_amp(amp=100, period=1000, x0=0, xn=50000, step=1, k=0.0001):
"""Generates an absolute cosine time series with the amplitude
exponentially decreasing
Arguments:
amp: amplitude of the cosine function
period: period of the cosine function
x0: initial x of the time series
xn: final x of the time series
step: step of the time series discretization
k: exponential rate
"""
cos = np.zeros(((xn - x0) * step, 1, 1))
for i in range(len(cos)):
idx = x0 + i * step
cos[i, 0, 0] = amp * np.cos(2 * np.pi * idx / period)
cos[i, 0, 0] = cos[i, 0, 0] * np.exp(-k * idx)
return cos
print('Generating Data...')
cos = gen_cosine_amp()
print('Input shape:', cos.shape)
expected_output = np.zeros((len(cos), 1))
for i in range(len(cos) - lahead):
expected_output[i, 0] = np.mean(cos[i + 1:i + lahead + 1])
print('Output shape:', expected_output.shape)
print('Creating Model...')
model = Sequential()
model.add(LSTM(50,
input_shape=(tsteps, 1),
batch_size=batch_size,
return_sequences=True,
stateful=True))
model.add(LSTM(50,
return_sequences=False,
stateful=True))
model.add(Dense(1))
model.compile(loss='mse', optimizer='rmsprop')
print('Training')
for i in range(epochs):
print('Epoch', i, '/', epochs)
model.fit(cos,
expected_output,
batch_size=batch_size,
verbose=1,
epochs=1,
shuffle=False)
model.reset_states()
print('Predicting')
predicted_output = model.predict(cos, batch_size=batch_size)
print('Plotting Results')
plt.subplot(2, 1, 1)
plt.plot(expected_output)
plt.title('Expected')
plt.subplot(2, 1, 2)
plt.plot(predicted_output)
plt.title('Predicted')
plt.show()
| 27.214286
| 75
| 0.643482
|
8e3150844476fc4b4a5bffa799fa155ed98a9ec2
| 2,380
|
py
|
Python
|
python/software_engineering/user.py
|
scottwedge/payscale-course-materials
|
14865f9fb9da434a40103004d504136e7d4d5a68
|
[
"Apache-2.0"
] | null | null | null |
python/software_engineering/user.py
|
scottwedge/payscale-course-materials
|
14865f9fb9da434a40103004d504136e7d4d5a68
|
[
"Apache-2.0"
] | 1
|
2020-02-15T18:26:40.000Z
|
2020-02-15T20:18:52.000Z
|
python/software_engineering/user.py
|
scottwedge/payscale-course-materials
|
14865f9fb9da434a40103004d504136e7d4d5a68
|
[
"Apache-2.0"
] | 1
|
2020-02-15T18:19:20.000Z
|
2020-02-15T18:19:20.000Z
|
# from lib import Stack # uncomment when you have a class Stack
# from lib import Queue # uncomment when you have a class Queue
# Stack code
# uncomment when you've implemented "__init__"
# s = Stack()
# print(s._data, "should be []") # note, you don't normally using the _underscore_data
# uncomment when you've implemented "push"
# s.push(5)
# s.push(6)
# print(s._data, "should be [5, 6]")
# uncomment when you've implemented "__len__"
# print(len(s), "should be 2")
# uncomment when you've implemented "__repr__"
# print(s, "should be Stack(5, 6)")
# uncomment when you've implemented "pop"
# x = s.pop()
# uncomment before *** the when you've implemented
# __init__, __repr__, pop, and push
# print(len(s), "should be 1")
# print(s, "should be Stack(5)")
# y = s.pop()
# print(len(s), "should be 0")
# print(s, "should be Stack()")
# print(x, "should be 6")
# print(y, "should be 5")
# ***
# uncomment when you've implemented peek and the methods above
# s.push(1)
# peeked = s.peek()
# print(peeked, "should be 1")
# print(len(s), "should be 1")
# print(s, "should be Stack(1)")
# Queue code
# q = Queue()
# print(q._data, "should be []")
# uncomment when you've implemented enqueue
# q.enqueue(5)
# q.enqueue(4)
# print(q._data, "should be [4, 5]")
# uncomment when you've implemented __repr__
# print(q, "should be Queue(4, 5)")
# uncomment when you've implemented __len__
# print(len(q), "should be 2")
# uncomment when you've implemented dequeue
# x = q.dequeue()
# print(x, "should be 4")
# print(q, "should be Queue(5)")
# y = q.dequeue()
# print(y, "should be 5")
# print(q, "should be Queue()")
# Extra credit
# Use Stack to solve the balanced perenthesis problem
# Note, the "parentheses" to consider are (){}[]
# def parens_balanced(string):
# # insert implementation here
# pass
# text = "sljsfd(sdfjlk)sfkj)"
# result = parens_balanced(text)
# print(result, "should be False")
# text = "s(ljsfd(sdfjlk)sfkj)"
# result = parens_balanced(text)
# print(result, "should be True")
# text = "({})"
# result = parens_balanced(text)
# print(result, "should be True")
# text = "()[]{[()]}{}"
# result = parens_balanced(text)
# print(result, "should be True")
# text = "({)}"
# result = parens_balanced(text)
# print(result, "should be False")
# text = "()[]{[()}{}"
# result = parens_balanced(text)
# print(result, "should be False")
| 23.564356
| 87
| 0.653782
|
c67580a935f9b29368bcbe1e59c6c03707728d35
| 6,489
|
py
|
Python
|
exps/ppyoloe/ppyoloe_crn_s_voc2012.py
|
jie311/miemiedetection
|
b0e7a45717fe6c9cf9bf3c0f47d47a2e6c68b1b6
|
[
"Apache-2.0"
] | 65
|
2021-12-30T03:30:52.000Z
|
2022-03-25T01:44:32.000Z
|
exps/ppyoloe/ppyoloe_crn_s_voc2012.py
|
jie311/miemiedetection
|
b0e7a45717fe6c9cf9bf3c0f47d47a2e6c68b1b6
|
[
"Apache-2.0"
] | 1
|
2021-12-31T01:51:35.000Z
|
2022-01-01T14:42:37.000Z
|
exps/ppyoloe/ppyoloe_crn_s_voc2012.py
|
jie311/miemiedetection
|
b0e7a45717fe6c9cf9bf3c0f47d47a2e6c68b1b6
|
[
"Apache-2.0"
] | 7
|
2021-12-31T09:25:06.000Z
|
2022-03-10T01:25:09.000Z
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# @miemie2013
import os
import sys
from mmdet.exp import PPYOLOE_Method_Exp
class Exp(PPYOLOE_Method_Exp):
def __init__(self):
super(Exp, self).__init__()
# custom dataset
self.num_classes = 20
self.data_dir = '../VOCdevkit/VOC2012'
self.cls_names = 'class_names/voc_classes.txt'
self.ann_folder = "annotations2"
self.train_ann = "voc2012_val2.json"
self.val_ann = "voc2012_val2.json"
self.train_ann = "voc2012_train.json"
self.val_ann = "voc2012_val.json"
self.train_image_folder = "JPEGImages"
self.val_image_folder = "JPEGImages"
# COCO2017 dataset。用来调试。
# self.num_classes = 80
# self.data_dir = '../COCO'
# self.cls_names = 'class_names/coco_classes.txt'
# self.ann_folder = "annotations"
# self.train_ann = "instances_val2017.json"
# self.val_ann = "instances_val2017.json"
# self.train_image_folder = "val2017"
# self.val_image_folder = "val2017"
# ---------------- architecture name(算法名) ---------------- #
self.archi_name = 'PPYOLOE'
# -------------- training config --------------------- #
self.max_epoch = 16
self.ema = True
self.ema_decay = 0.9998
self.weight_decay = 5e-4
self.momentum = 0.9
self.print_interval = 20
self.eval_interval = 2
self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
# learning_rate
self.scheduler = "warm_cosinedecay"
self.warmup_epochs = 1
self.cosinedecay_epochs = 20
self.basic_lr_per_img = 0.04 / (8. * 32.0)
self.start_factor = 0.0
# ----------------- testing config ------------------ #
self.eval_height = 640
self.eval_width = 640
self.test_size = [self.eval_height, self.eval_width]
# ---------------- model config ---------------- #
self.output_dir = "PPYOLOE_outputs"
self.depth_mult = 0.33
self.width_mult = 0.50
self.backbone_type = 'CSPResNet'
self.backbone = dict(
layers=[3, 6, 6, 3],
channels=[64, 128, 256, 512, 1024],
return_idx=[1, 2, 3],
freeze_at=-1,
use_large_stem=True,
depth_mult=self.depth_mult,
width_mult=self.width_mult,
)
self.fpn_type = 'CustomCSPPAN'
self.fpn = dict(
in_channels=[int(256 * self.width_mult), int(512 * self.width_mult), int(1024 * self.width_mult)],
out_channels=[768, 384, 192],
stage_num=1,
block_num=3,
act='swish',
spp=True,
depth_mult=self.depth_mult,
width_mult=self.width_mult,
)
self.head_type = 'PPYOLOEHead'
self.head = dict(
in_channels=[int(768 * self.width_mult), int(384 * self.width_mult), int(192 * self.width_mult)],
fpn_strides=[32, 16, 8],
grid_cell_scale=5.0,
grid_cell_offset=0.5,
static_assigner_epoch=4,
use_varifocal_loss=True,
num_classes=self.num_classes,
loss_weight={'class': 1.0, 'iou': 2.5, 'dfl': 0.5, },
eval_size=self.test_size,
)
self.static_assigner_type = 'ATSSAssigner'
self.static_assigner = dict(
topk=9,
num_classes=self.num_classes,
)
self.assigner_type = 'TaskAlignedAssigner'
self.assigner = dict(
topk=13,
alpha=1.0,
beta=6.0,
)
self.nms_cfg = dict(
nms_type='multiclass_nms',
score_threshold=0.01,
nms_threshold=0.6,
nms_top_k=1000,
keep_top_k=100,
)
# ---------------- 预处理相关 ---------------- #
self.context = {'fields': ['image', 'gt_bbox', 'gt_class', 'gt_score']}
# DecodeImage
self.decodeImage = dict(
to_rgb=True,
with_mixup=False,
with_cutmix=False,
with_mosaic=False,
)
# ColorDistort
self.colorDistort = dict()
# RandomExpand
self.randomExpand = dict(
fill_value=[123.675, 116.28, 103.53],
)
# RandomCrop
self.randomCrop = dict()
# RandomFlipImage
self.randomFlipImage = dict(
is_normalized=False,
)
# RandomShape
self.randomShape = dict(
sizes=[320, 352, 384, 416, 448, 480, 512, 544, 576, 608, 640, 672, 704, 736, 768],
# sizes=[640],
random_inter=True,
resize_box=True,
)
# NormalizeImage
self.normalizeImage = dict(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
is_scale=True,
is_channel_first=False,
)
# Permute
self.permute = dict(
to_bgr=False,
channel_first=True,
)
# PadGT
self.padGT = dict(
num_max_boxes=200,
)
# ResizeImage
self.resizeImage = dict(
target_size=640,
interp=2,
)
# 预处理顺序。增加一些数据增强时这里也要加上,否则train.py中相当于没加!
self.sample_transforms_seq = []
self.sample_transforms_seq.append('decodeImage')
self.sample_transforms_seq.append('colorDistort')
self.sample_transforms_seq.append('randomExpand')
self.sample_transforms_seq.append('randomCrop')
self.sample_transforms_seq.append('randomFlipImage')
self.sample_transforms_seq.append('randomShape')
self.sample_transforms_seq.append('normalizeImage')
self.sample_transforms_seq.append('permute')
self.sample_transforms_seq.append('padGT')
self.batch_transforms_seq = []
# self.batch_transforms_seq.append('padGT')
# ---------------- dataloader config ---------------- #
# 默认是4。如果报错“OSError: [WinError 1455] 页面文件太小,无法完成操作”,设置为2或0解决。
self.data_num_workers = 1
self.eval_data_num_workers = 0
# 判断是否是调试状态
isDebug = True if sys.gettrace() else False
if isDebug:
print('Debug Mode.')
self.data_dir = '../' + self.data_dir
self.cls_names = '../' + self.cls_names
self.output_dir = '../' + self.output_dir
| 33.448454
| 110
| 0.542456
|
1895bc561d67290cbaeaa852a2fa74aeccca70b2
| 1,031
|
py
|
Python
|
modeling/backbone/__init__.py
|
kyotovision-public/multimodal-material-segmentation
|
2f057e9efca0780a887c97fbca9fcfb49fb4d03a
|
[
"MIT"
] | null | null | null |
modeling/backbone/__init__.py
|
kyotovision-public/multimodal-material-segmentation
|
2f057e9efca0780a887c97fbca9fcfb49fb4d03a
|
[
"MIT"
] | null | null | null |
modeling/backbone/__init__.py
|
kyotovision-public/multimodal-material-segmentation
|
2f057e9efca0780a887c97fbca9fcfb49fb4d03a
|
[
"MIT"
] | null | null | null |
from modeling.backbone import resnet, xception, drn, mobilenet, resnet_adv, xception_adv
def build_backbone(backbone, output_stride, BatchNorm, input_dim=3, pretrained=False):
if backbone == 'resnet':
return resnet.ResNet101(output_stride, BatchNorm, pretrained=False)
elif backbone == 'resnet_adv':
return resnet_adv.ResNet101(output_stride, BatchNorm, pretrained=pretrained, input_dim=input_dim)
elif backbone == 'resnet_condconv':
return resnet_condconv.ResNet101(output_stride, BatchNorm, pretrained=pretrained, input_dim=input_dim)
elif backbone == 'xception':
return xception.AlignedXception(output_stride, BatchNorm)
elif backbone == 'xception_adv':
return xception_adv.AlignedXception(output_stride, BatchNorm, pretrained=pretrained, input_dim=input_dim)
elif backbone == 'drn':
return drn.drn_d_54(BatchNorm)
elif backbone == 'mobilenet':
return mobilenet.MobileNetV2(output_stride, BatchNorm)
else:
raise NotImplementedError
| 54.263158
| 113
| 0.746848
|
10cb433019b80a025f341ed3c2a9672a17777f99
| 5,648
|
py
|
Python
|
tests/test_provider_conf.py
|
brean/goblin
|
c61177de7f0e59661fbe74531ea4cb58558a1f31
|
[
"Apache-2.0"
] | 82
|
2016-11-17T10:07:55.000Z
|
2021-09-04T12:53:16.000Z
|
tests/test_provider_conf.py
|
brean/goblin
|
c61177de7f0e59661fbe74531ea4cb58558a1f31
|
[
"Apache-2.0"
] | 76
|
2016-07-06T15:01:51.000Z
|
2020-08-26T18:04:27.000Z
|
tests/test_provider_conf.py
|
brean/goblin
|
c61177de7f0e59661fbe74531ea4cb58558a1f31
|
[
"Apache-2.0"
] | 24
|
2017-02-19T05:23:42.000Z
|
2020-06-18T22:51:10.000Z
|
# import asyncio
# import uuid
# from unittest import mock
#
# import json
# import pytest
#
# import aiohttp
# from aiohttp import client_ws
#
# from aiogremlin.gremlin_python.driver import request
#
# import goblin
# from goblin import driver
# from goblin import provider
#
# request_id = uuid.UUID(int=215449331521667564889692237976543325869,
# version=4)
#
#
# # based on this handy tip on SO:
# # http://stackoverflow.com/a/29905620/6691423
# def get_mock_coro(return_value):
# async def mock_coro(*args, **kwargs):
# return return_value
#
# return mock.Mock(wraps=mock_coro)
#
#
# async def mock_receive():
# message = mock.Mock()
# message.tp = aiohttp.MsgType.close
# return message
#
#
# async def mock_ws_connect(*args, **kwargs):
# mock_client = mock.Mock(spec=client_ws.ClientWebSocketResponse)
# mock_client.closed = False
# mock_client.receive = mock.Mock(wraps=mock_receive)
# mock_client.close = get_mock_coro(None)
# return mock_client
#
#
# class TestProvider(provider.Provider):
# DEFAULT_OP_ARGS = {
# 'standard': {
# 'eval': {
# 'fictional_argument': 'fictional_value'
# },
# },
# 'session': {
# 'eval': {
# 'manageTransaction': True
# },
# }
# }
#
# @staticmethod
# def get_hashable_id(val):
# return val
#
#
# def deserialize_json_request(request):
# header_len = request[0] + 1
# payload = request[header_len:]
# return json.loads(payload.decode())
#
#
# @pytest.fixture(params=(driver.GraphSONMessageSerializer,
# driver.GraphSONMessageSerializer))
# def message_serializer(request):
# return request.param
#
#
# @pytest.mark.parametrize(
# 'processor_name,key,value',
# (('standard', 'fictional_argument', 'fictional_value'),
# ('session', 'manageTransaction', True)))
# def test_get_processor_provider_default_args(processor_name, key, value):
# processor = driver.GraphSONMessageSerializer.get_processor(
# TestProvider, processor_name)
# assert processor._default_args == TestProvider.DEFAULT_OP_ARGS[
# processor_name]
# eval_args = processor.get_op_args('eval', {'gremlin': 'g.V()'})
# assert eval_args['gremlin'] == 'g.V()'
# assert eval_args[key] == value
#
#
# @pytest.mark.parametrize('processor,key,value',
# (('', 'fictional_argument', 'fictional_value'),
# ('session', 'manageTransaction', True)))
# def test_serializer_default_op_args(message_serializer, processor, key,
# value):
# g = driver.AsyncGraph().traversal()
# traversal = g.V().hasLabel('stuff').has('foo', 'bar')
# serialized_message = message_serializer.serialize_message(
# TestProvider,
# str(uuid.uuid4()),
# processor=processor,
# op='eval',
# gremlin=traversal.bytecode)
# message = deserialize_json_request(serialized_message)
# assert message['args'][key] == value
#
#
# @pytest.mark.parametrize('processor,key,value',
# (('', 'fictional_argument', 'fictional_value'),
# ('session', 'manageTransaction', True)))
# @pytest.mark.asyncio
# async def test_conn_default_op_args(event_loop, monkeypatch, processor,
# key, value):
# mock_client_session = mock.Mock(spec=aiohttp.ClientSession)
# mock_client_session_instance = mock.Mock(spec=aiohttp.ClientSession)
# mock_client_session.return_value = mock_client_session_instance
# mock_client_session_instance.ws_connect = mock.Mock(
# wraps=mock_ws_connect)
# mock_client_session_instance.close = get_mock_coro(
# None) # otherwise awaiting ws.close is an error
#
# monkeypatch.setattr(aiohttp, 'ClientSession', mock_client_session)
# monkeypatch.setattr(uuid, 'uuid4', mock.Mock(return_value=request_id))
#
# conn = await driver.Connection.open(
# 'some_url',
# event_loop,
# message_serializer=driver.GraphSONMessageSerializer,
# provider=TestProvider)
#
# resp = await conn.submit(
# gremlin='g.V().hasLabel("foo").count()',
# processor=processor,
# op='eval')
#
# submitted_bytes = conn._ws.send_bytes.call_args[0][0]
# submitted_json = submitted_bytes[17:].decode()
# submitted_dict = json.loads(submitted_json)
#
# assert submitted_dict['args'][key] == value
#
# await conn.close()
# resp.close()
#
#
# @pytest.mark.asyncio
# async def test_cluster_conn_provider(event_loop, gremlin_host,
# gremlin_port):
# cluster = await driver.Cluster.open(
# event_loop,
# provider=TestProvider,
# hosts=[gremlin_host],
# port=gremlin_port)
# assert cluster.config['provider'] == TestProvider
#
# pooled_conn = await cluster.get_connection()
# assert pooled_conn._conn._provider == TestProvider
#
# await cluster.close()
#
#
# @pytest.mark.asyncio
# async def test_app_cluster_provider(event_loop):
# app = await goblin.Goblin.open(event_loop, provider=TestProvider)
# assert app._provider is TestProvider
# assert app._cluster.config['provider'] is TestProvider
#
# await app.close()
#
#
# @pytest.mark.asyncio
# async def test_app_provider_hashable_id(event_loop):
# app = await goblin.Goblin.open(event_loop, provider=TestProvider)
# assert app._get_hashable_id is TestProvider.get_hashable_id
#
# await app.close()
| 32.274286
| 76
| 0.643591
|
b895de8f8423a73454f377ca36c04bdd4afc57cc
| 1,126
|
py
|
Python
|
kubernetes/test/test_v1_cinder_volume_source.py
|
amanagarwal33/python
|
e31693557f75950805fb4dc5af4cb7434a470e26
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_v1_cinder_volume_source.py
|
amanagarwal33/python
|
e31693557f75950805fb4dc5af4cb7434a470e26
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_v1_cinder_volume_source.py
|
amanagarwal33/python
|
e31693557f75950805fb4dc5af4cb7434a470e26
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
<<<<<<< HEAD
OpenAPI spec version: v1.15.6
Generated by: https://openapi-generator.tech
=======
OpenAPI spec version: v1.5.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
>>>>>>> release-1.0
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.models.v1_cinder_volume_source import V1CinderVolumeSource # noqa: E501
from kubernetes.client.rest import ApiException
class TestV1CinderVolumeSource(unittest.TestCase):
"""V1CinderVolumeSource unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1CinderVolumeSource(self):
"""Test V1CinderVolumeSource"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes.client.models.v1_cinder_volume_source.V1CinderVolumeSource() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 24.478261
| 124
| 0.71048
|
e59506ec652075f397ffe0eb1fd56a4177978828
| 417
|
py
|
Python
|
environment/Scripts/pip3.7-script.py
|
pumbas600/CriticalPath
|
31889c875dedf733aeb9a4ebeba8bf8930e86176
|
[
"MIT"
] | null | null | null |
environment/Scripts/pip3.7-script.py
|
pumbas600/CriticalPath
|
31889c875dedf733aeb9a4ebeba8bf8930e86176
|
[
"MIT"
] | null | null | null |
environment/Scripts/pip3.7-script.py
|
pumbas600/CriticalPath
|
31889c875dedf733aeb9a4ebeba8bf8930e86176
|
[
"MIT"
] | null | null | null |
#!D:\Python\Saves\CriticalPath3.7\environment\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')()
)
| 32.076923
| 70
| 0.669065
|
c348992067873a5750d8744e5e3f2f62f7719032
| 2,687
|
py
|
Python
|
rlo/paper/scripts/plot_cost_sequence.py
|
tomjaguarpaw/knossos-ksc
|
8fa75e67c0db8f632b135379740051cd10ff31f2
|
[
"MIT"
] | 31
|
2021-09-09T16:09:55.000Z
|
2022-02-20T02:15:19.000Z
|
rlo/paper/scripts/plot_cost_sequence.py
|
tomjaguarpaw/knossos-ksc
|
8fa75e67c0db8f632b135379740051cd10ff31f2
|
[
"MIT"
] | 40
|
2021-08-06T14:30:08.000Z
|
2022-01-19T08:49:52.000Z
|
rlo/paper/scripts/plot_cost_sequence.py
|
tomjaguarpaw/knossos-ksc
|
8fa75e67c0db8f632b135379740051cd10ff31f2
|
[
"MIT"
] | 5
|
2021-08-06T11:20:31.000Z
|
2022-01-07T19:39:40.000Z
|
# fmt: off
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
def blas():
from blas_result import cost_sequence, annotations
matplotlib.rcParams.update({'font.size': 15})
fig = plt.figure(figsize=(5, 11))
ax = fig.add_axes([0.2, 0.92, 0.75, 0.025])
ts = np.arange(len(cost_sequence), dtype=np.float64)
ax.semilogy(ts, cost_sequence, linewidth=6)
ax.set_ylim([7070100, 7200000])
ax.set_xlabel("Steps", fontsize=16)
ax.set_ylabel("Cost", fontsize=16)
ax.set_yticks([7e+6, 7.2e+6])
ax.set_yticklabels([])
ax.spines['top'].set_visible(False)
ax.yaxis.set_label_coords(-0.17, 2.0)
ax1 = fig.add_axes([0.2, 0.955, 0.75, 0.045])
ax1.semilogy(ts, cost_sequence, linewidth=6)
ax1.set_ylim([7200000, max(cost_sequence)])
ax1.set_xticklabels([])
ax1.spines['bottom'].set_visible(False)
for i, (t, cost, text) in enumerate(annotations):
ax.annotate(
text,
xy=(t, cost_sequence[t]),
xytext=(10, 320 - 310 * i),
textcoords="figure points",
arrowprops={"width": 2, "headwidth": 10},
bbox={"boxstyle": "round", "facecolor": "white"},
fontsize=11,
)
ax.annotate(
"cost={}".format(cost),
xy=(t, cost_sequence[t]),
xytext=(10, 320 - 310 * i),
textcoords="figure points",
weight="bold",
fontsize=11,
)
# ax1.annotate("$\\approx$", (-0.4, 0), xycoords="axes points")
plt.savefig("cost_sequence_blas.pdf")
def mnist():
from mnist_result import cost_sequence, annotations
matplotlib.rcParams.update({'font.size': 16})
fig = plt.figure(figsize=(15, 9))
ax = fig.add_axes([0.1, 0.7, 0.8, 0.28])
ts = np.arange(len(cost_sequence), dtype=np.float64)
ax.semilogy(ts, cost_sequence, linewidth=10)
ax.set_xlabel("Steps", fontsize=16)
ax.set_ylabel("Cost function", fontsize=16)
for i, (t, cost, text) in enumerate(annotations):
ax.annotate(
text,
xy=(t, cost_sequence[t]),
xytext=(10 + 350 * i, 10),
textcoords="figure points",
arrowprops={"width": 2, "headwidth": 10},
bbox={"boxstyle": "round", "facecolor": "white"},
fontsize=9,
)
ax.annotate(
"cost={}".format(cost),
xy=(t, cost_sequence[t]),
xytext=(10 + 350 * i, 10),
textcoords="figure points",
weight="bold",
fontsize=11,
)
plt.savefig("cost_sequence_mnist.pdf")
if __name__ == "__main__":
blas()
# mnist()
| 33.5875
| 67
| 0.570897
|
5e7864d1a0e7550ea3255f7b3c128478a6e10acc
| 98
|
py
|
Python
|
opentotp.py
|
prevenitylabs/opentotp
|
c2a7016097fd4176bad5835ef1e58050ea8c98ce
|
[
"MIT"
] | 1
|
2021-11-15T09:17:47.000Z
|
2021-11-15T09:17:47.000Z
|
opentotp.py
|
prevenitylabs/opentotp
|
c2a7016097fd4176bad5835ef1e58050ea8c98ce
|
[
"MIT"
] | null | null | null |
opentotp.py
|
prevenitylabs/opentotp
|
c2a7016097fd4176bad5835ef1e58050ea8c98ce
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from opentotp.__main__ import main
if __name__ == '__main__':
main()
| 14
| 34
| 0.693878
|
97c4f90675eda5c2ee821b0dee889e68948b913b
| 243
|
py
|
Python
|
trakt/__init__.py
|
reiniervdwindt/pytrakt
|
c06203ea5e0477a784e66e1bb66d66bf9b4184ea
|
[
"Apache-2.0"
] | null | null | null |
trakt/__init__.py
|
reiniervdwindt/pytrakt
|
c06203ea5e0477a784e66e1bb66d66bf9b4184ea
|
[
"Apache-2.0"
] | null | null | null |
trakt/__init__.py
|
reiniervdwindt/pytrakt
|
c06203ea5e0477a784e66e1bb66d66bf9b4184ea
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""A wrapper for the Trakt.tv REST API"""
try:
from trakt.core import * # NOQA
except ImportError:
pass
version_info = (2, 4, 5)
__author__ = 'Jon Nappi'
__version__ = '.'.join([str(i) for i in version_info])
| 22.090909
| 54
| 0.63786
|
d4e93585dab4a07e5227471b56a642d54332a485
| 6,207
|
py
|
Python
|
fec/version/v7_0/F1.py
|
h4ck3rm1k3/FEC-Field-Documentation
|
c2f1f36e14c67ac3656c09f801b9f595d3e9f92e
|
[
"Unlicense"
] | 1
|
2016-06-13T23:54:31.000Z
|
2016-06-13T23:54:31.000Z
|
fec/version/v8_0/F1.py
|
h4ck3rm1k3/FEC-Field-Documentation
|
c2f1f36e14c67ac3656c09f801b9f595d3e9f92e
|
[
"Unlicense"
] | null | null | null |
fec/version/v8_0/F1.py
|
h4ck3rm1k3/FEC-Field-Documentation
|
c2f1f36e14c67ac3656c09f801b9f595d3e9f92e
|
[
"Unlicense"
] | 1
|
2019-07-03T10:35:19.000Z
|
2019-07-03T10:35:19.000Z
|
import fechbase
class Records(fechbase.RecordsBase):
def __init__(self):
fechbase.RecordsBase.__init__(self)
self.fields = [
{'name': 'FORM TYPE', 'number': '1'},
{'name': 'FILER COMMITTEE ID NUMBER', 'number': '2'},
{'name': 'CHANGE OF COMMITTEE NAME', 'number': '3'},
{'name': 'COMMITTEE NAME', 'number': '4'},
{'name': 'CHANGE OF ADDRESS', 'number': '5'},
{'name': 'STREET 1', 'number': '6'},
{'name': 'STREET 2', 'number': '7'},
{'name': 'CITY', 'number': '8'},
{'name': 'STATE', 'number': '9'},
{'name': 'ZIP', 'number': '10'},
{'name': 'CHANGE OF COMMITTEE EMAIL', 'number': '11'},
{'name': 'COMMITTEE EMAIL', 'number': '12'},
{'name': 'CHANGE OF COMMITTEE WEB URL', 'number': '13'},
{'name': 'COMMITTEE WEB URL', 'number': '14'},
{'name': 'EFFECTIVE DATE', 'number': '15'},
{'name': 'SIGNATURE LAST NAME', 'number': '16'},
{'name': 'SIGNATURE FIRST NAME', 'number': '17'},
{'name': 'SIGNATURE MIDDLE NAME', 'number': '18'},
{'name': 'SIGNATURE PREFIX', 'number': '19'},
{'name': 'SIGNATURE SUFFIX', 'number': '20'},
{'name': 'DATE SIGNED', 'number': '21'},
{'name': 'COMMITTEE TYPE', 'number': '22-5.'},
{'name': 'CANDIDATE ID NUMBER', 'number': '23-5.'},
{'name': 'CANDIDATE LAST NAME', 'number': '24-5.'},
{'name': 'CANDIDATE FIRST NAME', 'number': '25-5.'},
{'name': 'CANDIDATE MIDDLE NAME', 'number': '26-5.'},
{'name': 'CANDIDATE PREFIX', 'number': '27-5.'},
{'name': 'CANDIDATE SUFFIX', 'number': '28-5.'},
{'name': 'CANDIDATE OFFICE', 'number': '29-5.'},
{'name': 'CANDIDATE STATE', 'number': '30-5.'},
{'name': 'CANDIDATE DISTRICT', 'number': '31-5.'},
{'name': 'PARTY CODE', 'number': '32-5.'},
{'name': 'PARTY TYPE', 'number': '33-5.'},
{'name': 'ORGANIZATION TYPE', 'number': '34-5 (e).'},
{'name': 'LOBBYIST/REGISTRANT PAC', 'number': '35-5 (e).'},
{'name': 'LOBBYIST/REGISTRANT PAC', 'number': '36-5 (f).'},
{'name': 'LEADERSHIP PAC', 'number': '37-5 (f).'},
{'name': 'AFFILIATED Committee ID NUM', 'number': '38-6.'},
{'name': 'AFFILIATED Committee NAME', 'number': '39-6.'},
{'name': 'AFFILIATED CANDIDATE ID NUM', 'number': '40-6.'},
{'name': 'AFFILIATED LAST NAME', 'number': '41-6.'},
{'name': 'AFFILIATED FIRST NAME', 'number': '42-6.'},
{'name': 'AFFILIATED MIDDLE NAME', 'number': '43-6.'},
{'name': 'AFFILIATED PREFIX', 'number': '44-6.'},
{'name': 'AFFILIATED SUFFIX', 'number': '45-6.'},
{'name': 'AFFILIATED STREET 1', 'number': '46-6.'},
{'name': 'AFFILIATED STREET 2', 'number': '47-6.'},
{'name': 'AFFILIATED CITY', 'number': '48-6.'},
{'name': 'AFFILIATED STATE', 'number': '49-6.'},
{'name': 'AFFILIATED ZIP', 'number': '50-6.'},
{'name': 'AFFILIATED RELATIONSHIP CODE', 'number': '51-6.'},
{'name': 'CUSTODIAN LAST NAME', 'number': '52-7.'},
{'name': 'CUSTODIAN FIRST NAME', 'number': '53-7.'},
{'name': 'CUSTODIAN MIDDLE NAME', 'number': '54-7.'},
{'name': 'CUSTODIAN PREFIX', 'number': '55-7.'},
{'name': 'CUSTODIAN SUFFIX', 'number': '56-7.'},
{'name': 'CUSTODIAN STREET 1', 'number': '57-7.'},
{'name': 'CUSTODIAN STREET 2', 'number': '58-7.'},
{'name': 'CUSTODIAN CITY', 'number': '59-7.'},
{'name': 'CUSTODIAN STATE', 'number': '60-7.'},
{'name': 'CUSTODIAN ZIP', 'number': '61-7.'},
{'name': 'CUSTODIAN TITLE', 'number': '62-7.'},
{'name': 'CUSTODIAN TELEPHONE', 'number': '63-7.'},
{'name': 'TREASURER LAST NAME', 'number': '64-8.'},
{'name': 'TREASURER FIRST NAME', 'number': '65-8.'},
{'name': 'TREASURER MIDDLE NAME', 'number': '66-8.'},
{'name': 'TREASURER PREFIX', 'number': '67-8.'},
{'name': 'TREASURER SUFFIX', 'number': '68-8.'},
{'name': 'TREASURER STREET 1', 'number': '69-8.'},
{'name': 'TREASURER STREET 2', 'number': '70-8.'},
{'name': 'TREASURER CITY', 'number': '71-8.'},
{'name': 'TREASURER STATE', 'number': '72-8.'},
{'name': 'TREASURER ZIP', 'number': '73-8.'},
{'name': 'TREASURER TITLE', 'number': '74-8.'},
{'name': 'TREASURER TELEPHONE', 'number': '75-8.'},
{'name': 'AGENT LAST NAME', 'number': '76-8.'},
{'name': 'AGENT FIRST NAME', 'number': '77-8.'},
{'name': 'AGENT MIDDLE NAME', 'number': '78-8.'},
{'name': 'AGENT PREFIX', 'number': '79-8.'},
{'name': 'AGENT SUFFIX', 'number': '80-8.'},
{'name': 'AGENT STREET 1', 'number': '81-8.'},
{'name': 'AGENT STREET 2', 'number': '82-8.'},
{'name': 'AGENT CITY', 'number': '83-8.'},
{'name': 'AGENT STATE', 'number': '84-8.'},
{'name': 'AGENT ZIP', 'number': '85-8.'},
{'name': 'AGENT TITLE', 'number': '86-8.'},
{'name': 'AGENT TELEPHONE', 'number': '87-8.'},
{'name': 'BANK NAME', 'number': '88-9. a)'},
{'name': 'BANK STREET 1', 'number': '89-9. a)'},
{'name': 'BANK STREET 2', 'number': '90-9. a)'},
{'name': 'BANK CITY', 'number': '91-9. a)'},
{'name': 'BANK STATE', 'number': '92-9. a)'},
{'name': 'BANK ZIP', 'number': '93-9. a)'},
{'name': 'BANK NAME', 'number': '94-9. b)'},
{'name': 'BANK STREET 1', 'number': '95-9. b)'},
{'name': 'BANK STREET 2', 'number': '96-9. b)'},
{'name': 'BANK CITY', 'number': '97-9. b)'},
{'name': 'BANK STATE', 'number': '98-9. b)'},
{'name': 'BANK ZIP', 'number': '99-9. b)'},
]
self.fields_names = self.hash_names(self.fields)
| 58.009346
| 72
| 0.456259
|
51b97d2d13ea2161647cbb5fda104c20c118dc08
| 15,482
|
py
|
Python
|
certbot/certbot/interfaces.py
|
sbraz/certbot
|
3058b6e748d085f53dd5c550be2806b26e7195eb
|
[
"Apache-2.0"
] | null | null | null |
certbot/certbot/interfaces.py
|
sbraz/certbot
|
3058b6e748d085f53dd5c550be2806b26e7195eb
|
[
"Apache-2.0"
] | null | null | null |
certbot/certbot/interfaces.py
|
sbraz/certbot
|
3058b6e748d085f53dd5c550be2806b26e7195eb
|
[
"Apache-2.0"
] | null | null | null |
"""Certbot client interfaces."""
from abc import ABCMeta
from abc import abstractmethod
from argparse import ArgumentParser
from typing import Iterable
from typing import List
from typing import Optional
import zope.interface
from acme.challenges import Challenge
from acme.challenges import ChallengeResponse
from certbot.achallenges import AnnotatedChallenge
from certbot import configuration
class AccountStorage(metaclass=ABCMeta):
"""Accounts storage interface."""
@abstractmethod
def find_all(self): # pragma: no cover
"""Find all accounts.
:returns: All found accounts.
:rtype: list
"""
raise NotImplementedError()
@abstractmethod
def load(self, account_id): # pragma: no cover
"""Load an account by its id.
:raises .AccountNotFound: if account could not be found
:raises .AccountStorageError: if account could not be loaded
"""
raise NotImplementedError()
@abstractmethod
def save(self, account, client): # pragma: no cover
"""Save account.
:raises .AccountStorageError: if account could not be saved
"""
raise NotImplementedError()
class IConfig(zope.interface.Interface): # pylint: disable=inherit-non-class
"""Deprecated, use certbot.configuration.NamespaceConfig instead."""
class IPluginFactory(zope.interface.Interface): # pylint: disable=inherit-non-class
"""Deprecated, use certbot.interfaces.Plugin as ABC instead."""
class IPlugin(zope.interface.Interface): # pylint: disable=inherit-non-class
"""Deprecated, use certbot.interfaces.Plugin as ABC instead."""
class Plugin(metaclass=ABCMeta):
"""Certbot plugin.
Objects providing this interface will be called without satisfying
any entry point "extras" (extra dependencies) you might have defined
for your plugin, e.g (excerpt from ``setup.py`` script)::
setup(
...
entry_points={
'certbot.plugins': [
'name=example_project.plugin[plugin_deps]',
],
},
extras_require={
'plugin_deps': ['dep1', 'dep2'],
}
)
Therefore, make sure such objects are importable and usable without
extras. This is necessary, because CLI does the following operations
(in order):
- loads an entry point,
- calls `inject_parser_options`,
- requires an entry point,
- creates plugin instance (`__call__`).
"""
description: str = NotImplemented
"""Short plugin description"""
@abstractmethod
def __init__(self, config: configuration.NamespaceConfig, name: str):
"""Create a new `Plugin`.
:param configuration.NamespaceConfig config: Configuration.
:param str name: Unique plugin name.
"""
super().__init__()
@abstractmethod
def prepare(self) -> None:
"""Prepare the plugin.
Finish up any additional initialization.
:raises .PluginError:
when full initialization cannot be completed.
:raises .MisconfigurationError:
when full initialization cannot be completed. Plugin will
be displayed on a list of available plugins.
:raises .NoInstallationError:
when the necessary programs/files cannot be located. Plugin
will NOT be displayed on a list of available plugins.
:raises .NotSupportedError:
when the installation is recognized, but the version is not
currently supported.
"""
@abstractmethod
def more_info(self) -> str:
"""Human-readable string to help the user.
Should describe the steps taken and any relevant info to help the user
decide which plugin to use.
:rtype str:
"""
@classmethod
@abstractmethod
def inject_parser_options(cls, parser: ArgumentParser, name: str) -> None:
"""Inject argument parser options (flags).
1. Be nice and prepend all options and destinations with
`~.common.option_namespace` and `~common.dest_namespace`.
2. Inject options (flags) only. Positional arguments are not
allowed, as this would break the CLI.
:param ArgumentParser parser: (Almost) top-level CLI parser.
:param str name: Unique plugin name.
"""
class IAuthenticator(IPlugin): # pylint: disable=inherit-non-class
"""Deprecated, use certbot.interfaces.Authenticator as ABC instead."""
class Authenticator(Plugin):
"""Generic Certbot Authenticator.
Class represents all possible tools processes that have the
ability to perform challenges and attain a certificate.
"""
@abstractmethod
def get_chall_pref(self, domain: str) -> Iterable[Challenge]:
"""Return `collections.Iterable` of challenge preferences.
:param str domain: Domain for which challenge preferences are sought.
:returns: `collections.Iterable` of challenge types (subclasses of
:class:`acme.challenges.Challenge`) with the most
preferred challenges first. If a type is not specified, it means the
Authenticator cannot perform the challenge.
:rtype: `collections.Iterable`
"""
@abstractmethod
def perform(self, achalls: List[AnnotatedChallenge]) -> Iterable[ChallengeResponse]:
"""Perform the given challenge.
:param list achalls: Non-empty (guaranteed) list of
:class:`~certbot.achallenges.AnnotatedChallenge`
instances, such that it contains types found within
:func:`get_chall_pref` only.
:returns: `collections.Iterable` of ACME
:class:`~acme.challenges.ChallengeResponse` instances corresponding to each provided
:class:`~acme.challenges.Challenge`.
:rtype: :class:`collections.Iterable` of
:class:`acme.challenges.ChallengeResponse`,
where responses are required to be returned in
the same order as corresponding input challenges
:raises .PluginError: If some or all challenges cannot be performed
"""
@abstractmethod
def cleanup(self, achalls: List[AnnotatedChallenge]) -> None:
"""Revert changes and shutdown after challenges complete.
This method should be able to revert all changes made by
perform, even if perform exited abnormally.
:param list achalls: Non-empty (guaranteed) list of
:class:`~certbot.achallenges.AnnotatedChallenge`
instances, a subset of those previously passed to :func:`perform`.
:raises PluginError: if original configuration cannot be restored
"""
class IInstaller(IPlugin): # pylint: disable=inherit-non-class
"""Deprecated, use certbot.interfaces.Installer as ABC instead."""
class Installer(Plugin):
"""Generic Certbot Installer Interface.
Represents any server that an X509 certificate can be placed.
It is assumed that :func:`save` is the only method that finalizes a
checkpoint. This is important to ensure that checkpoints are
restored in a consistent manner if requested by the user or in case
of an error.
Using :class:`certbot.reverter.Reverter` to implement checkpoints,
rollback, and recovery can dramatically simplify plugin development.
"""
@abstractmethod
def get_all_names(self) -> Iterable[str]:
"""Returns all names that may be authenticated.
:rtype: `collections.Iterable` of `str`
"""
@abstractmethod
def deploy_cert(self, domain: str, cert_path: str, key_path: str,
chain_path: str, fullchain_path: str) -> None:
"""Deploy certificate.
:param str domain: domain to deploy certificate file
:param str cert_path: absolute path to the certificate file
:param str key_path: absolute path to the private key file
:param str chain_path: absolute path to the certificate chain file
:param str fullchain_path: absolute path to the certificate fullchain
file (cert plus chain)
:raises .PluginError: when cert cannot be deployed
"""
@abstractmethod
def enhance(self, domain: str, enhancement: str, options: Optional[List[str]] = None) -> None:
"""Perform a configuration enhancement.
:param str domain: domain for which to provide enhancement
:param str enhancement: An enhancement as defined in
:const:`~certbot.plugins.enhancements.ENHANCEMENTS`
:param options: Flexible options parameter for enhancement.
Check documentation of
:const:`~certbot.plugins.enhancements.ENHANCEMENTS`
for expected options for each enhancement.
:raises .PluginError: If Enhancement is not supported, or if
an error occurs during the enhancement.
"""
@abstractmethod
def supported_enhancements(self) -> List[str]:
"""Returns a `collections.Iterable` of supported enhancements.
:returns: supported enhancements which should be a subset of
:const:`~certbot.plugins.enhancements.ENHANCEMENTS`
:rtype: :class:`collections.Iterable` of :class:`str`
"""
@abstractmethod
def save(self, title: Optional[str] = None, temporary: bool = False) -> None:
"""Saves all changes to the configuration files.
Both title and temporary are needed because a save may be
intended to be permanent, but the save is not ready to be a full
checkpoint.
It is assumed that at most one checkpoint is finalized by this
method. Additionally, if an exception is raised, it is assumed a
new checkpoint was not finalized.
:param str title: The title of the save. If a title is given, the
configuration will be saved as a new checkpoint and put in a
timestamped directory. `title` has no effect if temporary is true.
:param bool temporary: Indicates whether the changes made will
be quickly reversed in the future (challenges)
:raises .PluginError: when save is unsuccessful
"""
@abstractmethod
def rollback_checkpoints(self, rollback: int = 1) -> None:
"""Revert `rollback` number of configuration checkpoints.
:raises .PluginError: when configuration cannot be fully reverted
"""
@abstractmethod
def recovery_routine(self) -> None:
"""Revert configuration to most recent finalized checkpoint.
Remove all changes (temporary and permanent) that have not been
finalized. This is useful to protect against crashes and other
execution interruptions.
:raises .errors.PluginError: If unable to recover the configuration
"""
@abstractmethod
def config_test(self) -> None:
"""Make sure the configuration is valid.
:raises .MisconfigurationError: when the config is not in a usable state
"""
@abstractmethod
def restart(self) -> None:
"""Restart or refresh the server content.
:raises .PluginError: when server cannot be restarted
"""
class IDisplay(zope.interface.Interface): # pylint: disable=inherit-non-class
"""Deprecated, use your own Display implementation instead."""
class IReporter(zope.interface.Interface): # pylint: disable=inherit-non-class
"""Deprecated, use your own Reporter implementation instead."""
class RenewableCert(metaclass=ABCMeta):
"""Interface to a certificate lineage."""
@property
@abstractmethod
def cert_path(self):
"""Path to the certificate file.
:rtype: str
"""
@property
@abstractmethod
def key_path(self):
"""Path to the private key file.
:rtype: str
"""
@property
@abstractmethod
def chain_path(self):
"""Path to the certificate chain file.
:rtype: str
"""
@property
@abstractmethod
def fullchain_path(self):
"""Path to the full chain file.
The full chain is the certificate file plus the chain file.
:rtype: str
"""
@property
@abstractmethod
def lineagename(self):
"""Name given to the certificate lineage.
:rtype: str
"""
@abstractmethod
def names(self):
"""What are the subject names of this certificate?
:returns: the subject names
:rtype: `list` of `str`
:raises .CertStorageError: if could not find cert file.
"""
# Updater interfaces
#
# When "certbot renew" is run, Certbot will iterate over each lineage and check
# if the selected installer for that lineage is a subclass of each updater
# class. If it is and the update of that type is configured to be run for that
# lineage, the relevant update function will be called for it. These functions
# are never called for other subcommands, so if an installer wants to perform
# an update during the run or install subcommand, it should do so when
# :func:`IInstaller.deploy_cert` is called.
class GenericUpdater(metaclass=ABCMeta):
"""Interface for update types not currently specified by Certbot.
This class allows plugins to perform types of updates that Certbot hasn't
defined (yet).
To make use of this interface, the installer should implement the interface
methods, and interfaces.GenericUpdater.register(InstallerClass) should
be called from the installer code.
The plugins implementing this enhancement are responsible of handling
the saving of configuration checkpoints as well as other calls to
interface methods of `interfaces.IInstaller` such as prepare() and restart()
"""
@abstractmethod
def generic_updates(self, lineage, *args, **kwargs):
"""Perform any update types defined by the installer.
If an installer is a subclass of the class containing this method, this
function will always be called when "certbot renew" is run. If the
update defined by the installer should be run conditionally, the
installer needs to handle checking the conditions itself.
This method is called once for each lineage.
:param lineage: Certificate lineage object
:type lineage: RenewableCert
"""
class RenewDeployer(metaclass=ABCMeta):
"""Interface for update types run when a lineage is renewed
This class allows plugins to perform types of updates that need to run at
lineage renewal that Certbot hasn't defined (yet).
To make use of this interface, the installer should implement the interface
methods, and interfaces.RenewDeployer.register(InstallerClass) should
be called from the installer code.
"""
@abstractmethod
def renew_deploy(self, lineage, *args, **kwargs):
"""Perform updates defined by installer when a certificate has been renewed
If an installer is a subclass of the class containing this method, this
function will always be called when a certficate has been renewed by
running "certbot renew". For example if a plugin needs to copy a
certificate over, or change configuration based on the new certificate.
This method is called once for each lineage renewed
:param lineage: Certificate lineage object
:type lineage: RenewableCert
"""
| 32.05383
| 98
| 0.673233
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.