hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
836a1f95f9bc7256c74547e4b46165f7f107b034
| 286
|
py
|
Python
|
test_service.py
|
jgawrilo/qcr_ci
|
bd4c192444e03a551e3c5f4f0a275a4c029294de
|
[
"Apache-2.0"
] | 1
|
2020-03-05T13:27:39.000Z
|
2020-03-05T13:27:39.000Z
|
test_service.py
|
jgawrilo/qcr_ci
|
bd4c192444e03a551e3c5f4f0a275a4c029294de
|
[
"Apache-2.0"
] | null | null | null |
test_service.py
|
jgawrilo/qcr_ci
|
bd4c192444e03a551e3c5f4f0a275a4c029294de
|
[
"Apache-2.0"
] | null | null | null |
import requests
import json
headers = {'Content-Type': 'application/json'}
data = json.load(open("./test_input2.json"))
url = "http://localhost:5001/api/impact"
response = requests.post(url,data=json.dumps({"data":data}),headers=headers)
print json.dumps(response.json(),indent=2)
| 22
| 76
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 92
| 0.321678
|
836a92d066a5c850634a4179920f5c67049059c7
| 16,969
|
py
|
Python
|
google/appengine/ext/datastore_admin/backup_pb2.py
|
vladushakov987/appengine_python3
|
0dd481c73e2537a50ee10f1b79cd65938087e555
|
[
"Apache-2.0"
] | null | null | null |
google/appengine/ext/datastore_admin/backup_pb2.py
|
vladushakov987/appengine_python3
|
0dd481c73e2537a50ee10f1b79cd65938087e555
|
[
"Apache-2.0"
] | null | null | null |
google/appengine/ext/datastore_admin/backup_pb2.py
|
vladushakov987/appengine_python3
|
0dd481c73e2537a50ee10f1b79cd65938087e555
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
import google
from google.net.proto2.python.public import descriptor as _descriptor
from google.net.proto2.python.public import message as _message
from google.net.proto2.python.public import reflection as _reflection
from google.net.proto2.python.public import symbol_database as _symbol_database
from google.net.proto2.proto import descriptor_pb2
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='apphosting/ext/datastore_admin/backup.proto',
package='apphosting.ext.datastore_admin',
serialized_pb=_b('\n+apphosting/ext/datastore_admin/backup.proto\x12\x1e\x61pphosting.ext.datastore_admin\"\x8c\x01\n\x06\x42\x61\x63kup\x12?\n\x0b\x62\x61\x63kup_info\x18\x01 \x01(\x0b\x32*.apphosting.ext.datastore_admin.BackupInfo\x12\x41\n\tkind_info\x18\x02 \x03(\x0b\x32..apphosting.ext.datastore_admin.KindBackupInfo\"Q\n\nBackupInfo\x12\x13\n\x0b\x62\x61\x63kup_name\x18\x01 \x01(\t\x12\x17\n\x0fstart_timestamp\x18\x02 \x01(\x03\x12\x15\n\rend_timestamp\x18\x03 \x01(\x03\"\x8c\x01\n\x0eKindBackupInfo\x12\x0c\n\x04kind\x18\x01 \x02(\t\x12\x0c\n\x04\x66ile\x18\x02 \x03(\t\x12\x43\n\rentity_schema\x18\x03 \x01(\x0b\x32,.apphosting.ext.datastore_admin.EntitySchema\x12\x19\n\nis_partial\x18\x04 \x01(\x08:\x05\x66\x61lse\"\x90\x05\n\x0c\x45ntitySchema\x12\x0c\n\x04kind\x18\x01 \x01(\t\x12\x41\n\x05\x66ield\x18\x02 \x03(\x0b\x32\x32.apphosting.ext.datastore_admin.EntitySchema.Field\x1a\xb2\x01\n\x04Type\x12\x0f\n\x07is_list\x18\x01 \x01(\x08\x12R\n\x0eprimitive_type\x18\x02 \x03(\x0e\x32:.apphosting.ext.datastore_admin.EntitySchema.PrimitiveType\x12\x45\n\x0f\x65mbedded_schema\x18\x03 \x03(\x0b\x32,.apphosting.ext.datastore_admin.EntitySchema\x1aj\n\x05\x46ield\x12\x0c\n\x04name\x18\x01 \x02(\t\x12?\n\x04type\x18\x02 \x03(\x0b\x32\x31.apphosting.ext.datastore_admin.EntitySchema.Type\x12\x12\n\nfield_name\x18\x03 \x01(\t\"\x8d\x02\n\rPrimitiveType\x12\t\n\x05\x46LOAT\x10\x00\x12\x0b\n\x07INTEGER\x10\x01\x12\x0b\n\x07\x42OOLEAN\x10\x02\x12\n\n\x06STRING\x10\x03\x12\r\n\tDATE_TIME\x10\x04\x12\n\n\x06RATING\x10\x05\x12\x08\n\x04LINK\x10\x06\x12\x0c\n\x08\x43\x41TEGORY\x10\x07\x12\x10\n\x0cPHONE_NUMBER\x10\x08\x12\x12\n\x0ePOSTAL_ADDRESS\x10\t\x12\t\n\x05\x45MAIL\x10\n\x12\r\n\tIM_HANDLE\x10\x0b\x12\x0c\n\x08\x42LOB_KEY\x10\x0c\x12\x08\n\x04TEXT\x10\r\x12\x08\n\x04\x42LOB\x10\x0e\x12\x0e\n\nSHORT_BLOB\x10\x0f\x12\x08\n\x04USER\x10\x10\x12\r\n\tGEO_POINT\x10\x11\x12\r\n\tREFERENCE\x10\x12\x42\x14\x10\x02 \x02(\x02\x42\x0c\x42\x61\x63kupProtos')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_ENTITYSCHEMA_PRIMITIVETYPE = _descriptor.EnumDescriptor(
name='PrimitiveType',
full_name='apphosting.ext.datastore_admin.EntitySchema.PrimitiveType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='FLOAT', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INTEGER', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BOOLEAN', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STRING', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATE_TIME', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RATING', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LINK', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CATEGORY', index=7, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PHONE_NUMBER', index=8, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='POSTAL_ADDRESS', index=9, number=9,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EMAIL', index=10, number=10,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IM_HANDLE', index=11, number=11,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BLOB_KEY', index=12, number=12,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TEXT', index=13, number=13,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BLOB', index=14, number=14,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SHORT_BLOB', index=15, number=15,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='USER', index=16, number=16,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GEO_POINT', index=17, number=17,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='REFERENCE', index=18, number=18,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=836,
serialized_end=1105,
)
_sym_db.RegisterEnumDescriptor(_ENTITYSCHEMA_PRIMITIVETYPE)
_BACKUP = _descriptor.Descriptor(
name='Backup',
full_name='apphosting.ext.datastore_admin.Backup',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='backup_info', full_name='apphosting.ext.datastore_admin.Backup.backup_info', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='kind_info', full_name='apphosting.ext.datastore_admin.Backup.kind_info', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=80,
serialized_end=220,
)
_BACKUPINFO = _descriptor.Descriptor(
name='BackupInfo',
full_name='apphosting.ext.datastore_admin.BackupInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='backup_name', full_name='apphosting.ext.datastore_admin.BackupInfo.backup_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='start_timestamp', full_name='apphosting.ext.datastore_admin.BackupInfo.start_timestamp', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='end_timestamp', full_name='apphosting.ext.datastore_admin.BackupInfo.end_timestamp', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=222,
serialized_end=303,
)
_KINDBACKUPINFO = _descriptor.Descriptor(
name='KindBackupInfo',
full_name='apphosting.ext.datastore_admin.KindBackupInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='kind', full_name='apphosting.ext.datastore_admin.KindBackupInfo.kind', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='file', full_name='apphosting.ext.datastore_admin.KindBackupInfo.file', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='entity_schema', full_name='apphosting.ext.datastore_admin.KindBackupInfo.entity_schema', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_partial', full_name='apphosting.ext.datastore_admin.KindBackupInfo.is_partial', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=306,
serialized_end=446,
)
_ENTITYSCHEMA_TYPE = _descriptor.Descriptor(
name='Type',
full_name='apphosting.ext.datastore_admin.EntitySchema.Type',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='is_list', full_name='apphosting.ext.datastore_admin.EntitySchema.Type.is_list', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='primitive_type', full_name='apphosting.ext.datastore_admin.EntitySchema.Type.primitive_type', index=1,
number=2, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='embedded_schema', full_name='apphosting.ext.datastore_admin.EntitySchema.Type.embedded_schema', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=547,
serialized_end=725,
)
_ENTITYSCHEMA_FIELD = _descriptor.Descriptor(
name='Field',
full_name='apphosting.ext.datastore_admin.EntitySchema.Field',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='apphosting.ext.datastore_admin.EntitySchema.Field.name', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='apphosting.ext.datastore_admin.EntitySchema.Field.type', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='field_name', full_name='apphosting.ext.datastore_admin.EntitySchema.Field.field_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=727,
serialized_end=833,
)
_ENTITYSCHEMA = _descriptor.Descriptor(
name='EntitySchema',
full_name='apphosting.ext.datastore_admin.EntitySchema',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='kind', full_name='apphosting.ext.datastore_admin.EntitySchema.kind', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='field', full_name='apphosting.ext.datastore_admin.EntitySchema.field', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_ENTITYSCHEMA_TYPE, _ENTITYSCHEMA_FIELD, ],
enum_types=[
_ENTITYSCHEMA_PRIMITIVETYPE,
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=449,
serialized_end=1105,
)
_BACKUP.fields_by_name['backup_info'].message_type = _BACKUPINFO
_BACKUP.fields_by_name['kind_info'].message_type = _KINDBACKUPINFO
_KINDBACKUPINFO.fields_by_name['entity_schema'].message_type = _ENTITYSCHEMA
_ENTITYSCHEMA_TYPE.fields_by_name['primitive_type'].enum_type = _ENTITYSCHEMA_PRIMITIVETYPE
_ENTITYSCHEMA_TYPE.fields_by_name['embedded_schema'].message_type = _ENTITYSCHEMA
_ENTITYSCHEMA_TYPE.containing_type = _ENTITYSCHEMA
_ENTITYSCHEMA_FIELD.fields_by_name['type'].message_type = _ENTITYSCHEMA_TYPE
_ENTITYSCHEMA_FIELD.containing_type = _ENTITYSCHEMA
_ENTITYSCHEMA.fields_by_name['field'].message_type = _ENTITYSCHEMA_FIELD
_ENTITYSCHEMA_PRIMITIVETYPE.containing_type = _ENTITYSCHEMA
DESCRIPTOR.message_types_by_name['Backup'] = _BACKUP
DESCRIPTOR.message_types_by_name['BackupInfo'] = _BACKUPINFO
DESCRIPTOR.message_types_by_name['KindBackupInfo'] = _KINDBACKUPINFO
DESCRIPTOR.message_types_by_name['EntitySchema'] = _ENTITYSCHEMA
Backup = _reflection.GeneratedProtocolMessageType('Backup', (_message.Message,), dict(
DESCRIPTOR = _BACKUP,
__module__ = 'google.appengine.ext.datastore_admin.backup_pb2'
))
_sym_db.RegisterMessage(Backup)
BackupInfo = _reflection.GeneratedProtocolMessageType('BackupInfo', (_message.Message,), dict(
DESCRIPTOR = _BACKUPINFO,
__module__ = 'google.appengine.ext.datastore_admin.backup_pb2'
))
_sym_db.RegisterMessage(BackupInfo)
KindBackupInfo = _reflection.GeneratedProtocolMessageType('KindBackupInfo', (_message.Message,), dict(
DESCRIPTOR = _KINDBACKUPINFO,
__module__ = 'google.appengine.ext.datastore_admin.backup_pb2'
))
_sym_db.RegisterMessage(KindBackupInfo)
EntitySchema = _reflection.GeneratedProtocolMessageType('EntitySchema', (_message.Message,), dict(
Type = _reflection.GeneratedProtocolMessageType('Type', (_message.Message,), dict(
DESCRIPTOR = _ENTITYSCHEMA_TYPE,
__module__ = 'google.appengine.ext.datastore_admin.backup_pb2'
))
,
Field = _reflection.GeneratedProtocolMessageType('Field', (_message.Message,), dict(
DESCRIPTOR = _ENTITYSCHEMA_FIELD,
__module__ = 'google.appengine.ext.datastore_admin.backup_pb2'
))
,
DESCRIPTOR = _ENTITYSCHEMA,
__module__ = 'google.appengine.ext.datastore_admin.backup_pb2'
))
_sym_db.RegisterMessage(EntitySchema)
_sym_db.RegisterMessage(EntitySchema.Type)
_sym_db.RegisterMessage(EntitySchema.Field)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\020\002 \002(\002B\014BackupProtos'))
| 37.376652
| 1,971
| 0.736755
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,930
| 0.29053
|
836acb8a4b706f8933f3b1012b5068f029201a8e
| 11,254
|
py
|
Python
|
PSBChart_support.py
|
georgepruitt/PSBChart
|
ee31497ffb12f818bab7ec750425f9fc7259c0f8
|
[
"Apache-2.0"
] | 1
|
2019-08-02T06:36:05.000Z
|
2019-08-02T06:36:05.000Z
|
PSBChart_support.py
|
schkr/PSBChart
|
bf19c2632491f18ba6ee6b3337bcb118350b9b3e
|
[
"Apache-2.0"
] | 1
|
2018-02-07T21:20:43.000Z
|
2018-02-07T21:20:43.000Z
|
PSBChart_support.py
|
schkr/PSBChart
|
bf19c2632491f18ba6ee6b3337bcb118350b9b3e
|
[
"Apache-2.0"
] | 1
|
2019-08-02T06:35:30.000Z
|
2019-08-02T06:35:30.000Z
|
#! /usr/bin/env python
#
# Support module generated by PAGE version 4.10
# In conjunction with Tcl version 8.6
# Jan 12, 2018 04:09:34 PM
import turtle
from turtle import TurtleScreen, RawTurtle, TK
from tkinter.filedialog import askopenfilename
import tkinter as tk
import os.path
import datetime
import csv
import sys
from PSBChart import ManageTrades
try:
from Tkinter import *
except ImportError:
from tkinter import *
try:
import ttk
py3 = 0
except ImportError:
import tkinter.ttk as ttk
py3 = 1
d = list()
dt = list()
o = list()
h = list()
l = list()
c = list()
v = list()
oi = list()
tradeDate = list()
tradeVal1 = list()
tradeType = list()
tradeSize = list()
tradeNtryOrXit = list()
tradePrice = list()
highestHigh = 0
lowestLow = 99999999
root = tk.Tk()
#root.withdraw()
##s = tk.ScrollBar(root)
T = tk.Text(root,height=10,width=50)
##s.pack(side=tk.RIGHT, fill = tk.Y)
T.pack(side=tk.RIGHT, fill = tk.Y)
##s.config(command=T.yview)
##T.config(yscrollcommand.set)
def manageTrades(trades,indicatorList):
if trades.load:
cnt = 0
file = askopenfilename(filetypes=(('CSV files', '*.csv'),
('TXT files', '*.txt'),('POR files', '*.por')),
title='Select Markets or Ports. To Test- CSV format only!')
with open(file) as f:
f_csv = csv.reader(f)
numDecs = 0
for row in f_csv:
numCols = len(row)
cnt += 1
tradeDate.append(int(row[0]))
# dt.append(datetime.datetime.strptime(row[0],'%Y%m%d'))
tradeVal1.append(int(row[1]))
tradeType.append(row[2])
tradeSize.append(int(row[3]))
tradePrice.append(float(row[4]))
print("Trades ",tradeDate[-1]," ",tradePrice[-1])
tradeCnt = cnt
trades.setLoadDraw(False,True)
w.Button5.configure(state = "disable")
loadAndDraw(False,True,indicatorList,trades)
def loadAndDraw(load,draw,indicatorList,trades):
def get_mouse_click_coor(x, y):
print(x, y)
barNumber = round(x/10)
barNumber = max(1,barNumber)
print("Bar Number: ",barNumber," ",d[startPt+barNumber-1]," ",o[startPt+barNumber-1]," ",highestHigh)
# tkMessageBox("Information",str(barNumber)
# # trtl.write('Vivax Solutions', font=("Arial", 20, "bold")) # chosing the font
## trtl.goto(10,highestHigh-.05*(highestHigh - lowestLow))
## trtl.pendown()
indexVal =startPt+barNumber-1
outPutStr = str(d[indexVal]) + " " +str(o[indexVal])+ " " +str(h[indexVal])+ " " +str(l[indexVal])+ " " + str(c[indexVal]) # chosing the font
root.focus_set()
T.focus_set( )
T.insert(tk.END,outPutStr+"\n")
## trtl.goto(20,highestHigh-60)
## trtl.write(str(o[50-(50-barNumber)]), font=("Arial", 8, "bold")) # chosing the font
## trtl.goto(20,highestHigh-80)
## trtl.write(str(h[50-(50-barNumber)]), font=("Arial", 8, "bold")) # chosing the font
## trtl.goto(20,highestHigh-100)
## trtl.write(str(l[50-(50-barNumber)]), font=("Arial", 8, "bold")) # chosing the font
## trtl.goto(20,highestHigh-120)
## trtl.write(str(c[50-(50-barNumber)]), font=("Arial", 8, "bold")) # chosing the font
##
## #root.withdraw()
if load == True:
cnt = 0
file = askopenfilename(filetypes=(('CSV files', '*.csv'),
('TXT files', '*.txt'),('POR files', '*.por')),
title='Select Markets or Ports. To Test- CSV format only!')
with open(file) as f:
f_csv = csv.reader(f)
numDecs = 0
for row in f_csv:
numCols = len(row)
cnt += 1
d.append(int(row[0]))
dt.append(datetime.datetime.strptime(row[0],'%Y%m%d'))
o.append(float(row[1]))
h.append(float(row[2]))
l.append(float(row[3]))
c.append(float(row[4]))
v.append(float(row[5]))
oi.append(float(row[6]))
oString= str(o[-1])
if '.' in oString:
decLoc = oString.index('.')
numDecs = max(numDecs,len(oString) - decLoc - 1)
xDate = list()
yVal = list()
zVal = list()
w.Button5.configure(state = "normal")
w.Entry1.insert(0,str(d[-1]))
if draw == True:
startDrawDateStr = w.Entry1.get()
startDrawDate = int(startDrawDateStr)
cnt = -1
for x in range(0,len(d)):
cnt+=1
if startDrawDate >= d[x]: startPt = x
numBarsPlot = 60
if startPt + numBarsPlot > len(d): startPt = len(d) - (numBarsPlot + 1)
print(startPt," ",len(d)," ",numBarsPlot);
indicCnt = 0
screen = TurtleScreen(w.Canvas1)
trtl = RawTurtle(screen)
screen.tracer(False)
screen.bgcolor('white')
clr=['red','green','blue','yellow','purple']
trtl.pensize(6)
trtl.penup()
trtl.color("black")
highestHigh = 0
lowestLow = 99999999
# scaleMult = 10**numDecs
scaleMult = 1
for days in range(startPt,startPt+numBarsPlot):
if h[days]*scaleMult > highestHigh: highestHigh = h[days]*scaleMult
if l[days]*scaleMult < lowestLow: lowestLow = l[days]*scaleMult
hhllDiffScale= (highestHigh - lowestLow) /1.65
hhllDiff = highestHigh - lowestLow
botOfChart = lowestLow
screen.setworldcoordinates(-10,highestHigh-hhllDiffScale,673,highestHigh)
print(highestHigh," ",lowestLow)
m=0
trtl.setheading(0)
trtl.penup()
for i in range(startPt,startPt+numBarsPlot+1):
m=m+1
trtl.goto(m*10,h[i]*scaleMult)
trtl.pendown()
trtl.goto(m*10,l[i]*scaleMult)
trtl.penup()
trtl.goto(m*10,c[i]*scaleMult)
trtl.pendown()
trtl.goto(m*10+5,c[i]*scaleMult)
trtl.penup()
trtl.goto(m*10,o[i]*scaleMult)
trtl.pendown()
trtl.goto(m*10-5,o[i]*scaleMult)
trtl.penup()
trtl.goto(10,highestHigh)
print("Indicator List: ",indicatorList)
if len(indicatorList)!=0:
movAvgParams = list([])
if "movAvg" in indicatorList:
movAvgVal = 0
movAvgParamIndexVal = indicatorList.index("movAvg")
movAvgParams.append(indicatorList[movAvgParamIndexVal + 1])
movAvgParams.append(indicatorList[movAvgParamIndexVal + 2])
movAvgParams.append(indicatorList[movAvgParamIndexVal + 3])
for j in range(0,3):
n = 0
trtl.penup()
if j == 0 : trtl.color("red")
if j == 1 : trtl.color("green")
if j == 2 : trtl.color("blue")
for i in range(startPt,startPt+numBarsPlot):
n = n + 1
movAvgVal = 0
for k in range(i-movAvgParams[j],i):
movAvgVal = movAvgVal + c[k] * scaleMult
if movAvgParams[j] !=0 :
movAvgVal = movAvgVal/movAvgParams[j]
if i == startPt : trtl.goto(n*10,movAvgVal)
trtl.pendown()
trtl.goto(n*10,movAvgVal)
trtl.penup()
# print("PlotTrades : ",plotTrades)
if trades.draw:
debugTradeDate = tradeDate[0]
debugDate = d[startPt]
n = 0
while debugTradeDate <= debugDate:
n +=1
debugTradeDate = tradeDate[n]
m = 0
for i in range(startPt,startPt+numBarsPlot):
m = m + 1
debugDate = d[i]
if debugDate == debugTradeDate:
trtl.penup()
tradeValue = tradePrice[n]
if tradeType[n] == "buy":
trtl.color("Green")
trtl.goto(m*10-5,tradeValue - hhllDiff *.03)
trtl.pensize(3)
trtl.pendown()
trtl.goto(m*10,tradeValue)
trtl.goto(m*10+5,tradeValue - hhllDiff *.03)
trtl.penup()
if tradeType[n] == "sell":
trtl.color("Red")
trtl.goto(m*10-5,tradeValue + hhllDiff *.03)
trtl.pensize(3)
trtl.pendown()
trtl.goto(m*10,tradeValue)
trtl.goto(m*10+5,tradeValue + hhllDiff *.03)
trtl.penup()
if tradeType[n] == "longLiq":
trtl.color("Blue")
trtl.penup()
trtl.goto(m*10-5, tradeValue)
trtl.pensize(3)
trtl.pendown()
trtl.goto(m*10+5, tradeValue)
trtl.penup()
trtl.pensize(1)
print("Found a trade: ",tradeValue," ",debugTradeDate," m= ",m," ",tradeValue-hhllDiff*.05)
n+=1
if n < len(tradeDate): debugTradeDate = tradeDate[n]
trtl.color("black")
trtl.goto(-10,botOfChart)
trtl.pendown()
trtl.goto(673,botOfChart)
trtl.penup()
trtl.goto(-10,botOfChart)
m = 0
for i in range(startPt,startPt+numBarsPlot):
if i % 10 == 0 :
m = m + 1
trtl.pendown()
trtl.write(str(d[i]), font=("Arial", 8, "bold")) # chosing the font
trtl.penup()
trtl.goto(m*100,botOfChart)
trtl.penup()
trtl.goto(628,highestHigh)
trtl.pendown()
trtl.goto(628,botOfChart)
trtl.penup()
m = 0
vertIncrement = hhllDiff/10
for i in range(0,11):
trtl.goto(630,highestHigh - m*vertIncrement)
trtl.pendown()
trtl.write(str(highestHigh - m * vertIncrement),font=("Arial", 8, "bold"))
trtl.penup()
m +=1
# turtle.done()
screen.onscreenclick(get_mouse_click_coor)
## turtle.mainloop()
def init(top, gui, *args, **kwargs):
global w, top_level, root
w = gui
top_level = top
root = top
def destroy_window():
# Function which closes the window.
global top_level
top_level.destroy()
top_level = None
if __name__ == '__main__':
import PSBChart
PSBChart.vp_start_gui()
| 34.521472
| 150
| 0.494846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,829
| 0.16252
|
55c1580b2b075823f72830e0bcd2511007db68b9
| 9,790
|
py
|
Python
|
test/low_use_test/test_reporter.py
|
KeithWhitley/LUAU
|
d7df6836e7c9c0ddc4099b9a17f7e0727eeeb179
|
[
"Apache-2.0"
] | 1
|
2020-10-16T13:02:36.000Z
|
2020-10-16T13:02:36.000Z
|
test/low_use_test/test_reporter.py
|
KeithWhitley/LUAU
|
d7df6836e7c9c0ddc4099b9a17f7e0727eeeb179
|
[
"Apache-2.0"
] | 3
|
2019-02-04T11:44:06.000Z
|
2019-02-05T14:09:04.000Z
|
test/low_use_test/test_reporter.py
|
KeithWhitley/LUAU
|
d7df6836e7c9c0ddc4099b9a17f7e0727eeeb179
|
[
"Apache-2.0"
] | 1
|
2021-05-26T12:00:06.000Z
|
2021-05-26T12:00:06.000Z
|
import unittest
import boto3
from moto import mock_dynamodb2, mock_ec2
from low_use.reporter import LowUseReporter
from util.aws import EC2Wrapper, DynamoWrapper
import os
class TestLowUseReporter(unittest.TestCase):
@mock_dynamodb2
@mock_ec2
def setUp(self):
self.session = boto3.Session(region_name='us-west-2')
self.wrapper = EC2Wrapper(self.session)
self.dynamo = DynamoWrapper(self.session)
os.environ['AWS_REGION'] = 'us-west-2'
self.reporter = LowUseReporter(None, None)
self.maxDiff = None
self.dynamo_resource = boto3.resource(
'dynamodb', region_name='us-west-2')
@mock_dynamodb2
def create_tables(self):
self.whitelist_table = self.dynamo_resource.create_table(
TableName='Whitelist',
KeySchema=[
{
'AttributeName': 'InstanceID',
'KeyType': 'HASH' # Partition key
}
],
AttributeDefinitions=[
{
'AttributeName': 'InstanceID',
'AttributeType': 'S'
},
{
'AttributeName': 'Creator',
'AttributeType': 'S'
},
{
'AttributeName': 'Reason',
'AttributeType': 'S'
},
],
ProvisionedThroughput={
'ReadCapacityUnits': 1,
'WriteCapacityUnits': 1
}
)
self.lowuse_table = self.dynamo_resource.create_table(
TableName='LowUse',
KeySchema=[
{
'AttributeName': 'InstanceID',
'KeyType': 'HASH' # Partition key
}
],
AttributeDefinitions=[
{
'AttributeName': 'InstanceID',
'AttributeType': 'S'
},
{
'AttributeName': 'Creator',
'AttributeType': 'S'
},
{
'AttributeName': 'Scheduled For Deletion',
'AttributeType': 'S'
},
],
ProvisionedThroughput={
'ReadCapacityUnits': 1,
'WriteCapacityUnits': 1
}
)
self.lowuse_table.meta.client.get_waiter(
'table_exists').wait(TableName='LowUse')
self.whitelist_table.meta.client.get_waiter(
'table_exists').wait(TableName='Whitelist')
self.dynamo.low_use = self.dynamo_resource.Table('LowUse')
self.dynamo.whitelist = self.dynamo_resource.Table('Whitelist')
def test_sync(self):
pass
@mock_dynamodb2
def test_sync_whitelist(self):
self.create_tables()
test_item = {
'InstanceID': 'test_id',
'Creator': 'test_creator',
'Reason': 'test_reason',
'EmailSent': False
}
self.reporter.whitelist.append(test_item)
self.reporter.sync_whitelist()
item = self.whitelist_table.get_item(
Key={'InstanceID': 'test_id'})['Item']
self.assertDictEqual(test_item, item)
@mock_dynamodb2
@mock_ec2
def test_sync_low_use_instances(self):
self.create_tables()
instance = self.wrapper.ec2.run_instances(MaxCount=1, MinCount=1)[
'Instances'][0]['InstanceId']
test_item = {
'InstanceID': instance,
'Creator': 'test_creator',
'Scheduled For Deletion': False,
'EmailSent': False
}
self.reporter.low_use_instances.append(test_item)
self.reporter.sync_low_use_instances()
item = self.lowuse_table.get_item(Key={'InstanceID': instance})['Item']
self.assertDictEqual(test_item, item)
self.assertTrue(self.wrapper.is_low_use(instance))
@mock_dynamodb2
@mock_ec2
def test_sync_instances_scheduled_for_deletion(self):
self.create_tables()
instance = self.wrapper.ec2.run_instances(MaxCount=1, MinCount=1)[
'Instances'][0]['InstanceId']
test_item = {
'InstanceID': instance,
'Creator': 'test_creator',
'Scheduled For Deletion': True,
}
self.reporter.instances_scheduled_for_deletion.append(test_item)
self.reporter.sync_instances_scheduled_for_deletion()
item = self.lowuse_table.get_item(Key={'InstanceID': instance})['Item']
self.assertDictEqual(test_item, item)
self.assertTrue(self.wrapper.is_scheduled_for_deletion(instance))
@mock_dynamodb2
@mock_ec2
def test_flag_instances_as_low_use(self):
self.create_tables()
instance = self.wrapper.ec2.run_instances(MaxCount=1, MinCount=1)[
'Instances'][0]['InstanceId']
test_item = {
'InstanceID': instance,
'Creator': 'test_creator',
'Scheduled For Deletion': False,
'EmailSent': False
}
self.reporter.flag_instance_as_low_use(instance, 'test_creator')
item = self.lowuse_table.get_item(Key={'InstanceID': instance})['Item']
self.assertDictEqual(test_item, item)
self.assertTrue(self.wrapper.is_low_use(instance))
@mock_dynamodb2
@mock_ec2
def test_flag_instance_for_deletion(self):
self.create_tables()
instance = self.wrapper.ec2.run_instances(MaxCount=1, MinCount=1)[
'Instances'][0]['InstanceId']
test_item = {
'InstanceID': instance,
'Creator': 'test_creator',
'Scheduled For Deletion': True,
}
self.reporter.flag_instance_for_deletion(instance, 'test_creator')
item = self.lowuse_table.get_item(Key={'InstanceID': instance})['Item']
self.assertDictEqual(test_item, item)
self.assertTrue(self.wrapper.is_scheduled_for_deletion(instance))
@mock_ec2
def test_sort_instances(self):
whitelist_instance = self.wrapper.ec2.run_instances(
MaxCount=1, MinCount=1)['Instances'][0]['InstanceId']
instance_to_stop = self.wrapper.ec2.run_instances(MaxCount=1, MinCount=1)[
'Instances'][0]['InstanceId']
low_use_instance = self.wrapper.ec2.run_instances(MaxCount=1, MinCount=1)[
'Instances'][0]['InstanceId']
schedule_to_delete_instance = self.wrapper.ec2.run_instances(
MaxCount=1, MinCount=1)['Instances'][0]['InstanceId']
self.wrapper.tag_as_low_use(schedule_to_delete_instance)
self.wrapper.tag_as_whitelisted(whitelist_instance)
self.wrapper.tag_for_deletion(instance_to_stop)
list_of_instances = [{
'instance_id': whitelist_instance
}, {
'instance_id': low_use_instance
}, {
'instance_id': instance_to_stop
}, {
'instance_id': schedule_to_delete_instance
},
]
expected_whitelist = [
{
'InstanceID': whitelist_instance,
'Creator': 'Unknown',
'Reason': None
}
]
expected_instances_to_stop = [instance_to_stop]
expected_low_use_list = [{
'InstanceID': low_use_instance,
'Creator': 'Unknown',
'Cost': 'Unknown',
'AverageCpuUsage': 'Unknown',
'AverageNetworkUsage': 'Unknown'
}]
expected_delete_list = [{
'InstanceID': schedule_to_delete_instance,
'Creator': 'Unknown',
'Cost': 'Unknown',
'AverageCpuUsage': 'Unknown',
'AverageNetworkUsage': 'Unknown'
}]
self.reporter.sort_instances(list_of_instances)
self.assertEqual(expected_whitelist, self.reporter.whitelist)
self.assertEqual(expected_low_use_list,
self.reporter.low_use_instances)
self.assertEqual(expected_delete_list,
self.reporter.instances_scheduled_for_deletion)
self.assertEqual(expected_instances_to_stop,
self.reporter.instances_to_stop)
def test_get_creator_report(self):
self.reporter.low_use_instances = [
{
'Creator': 'test1',
'InstanceID': 'test_id_1'
},
{
'Creator': 'test2',
'InstanceID': 'test_id_2'
}
]
self.reporter.instances_scheduled_for_deletion = [
{
'Creator': 'test1',
'InstanceID': 'test_id_1_delete'
},
{
'Creator': 'test2',
'InstanceID': 'test_id_2_delete'
}
]
expected_creator_reports = [
{
'creator': 'test1',
'low_use': [{
'Creator': 'test1',
'InstanceID': 'test_id_1'
}],
'scheduled_for_deletion': [{
'Creator': 'test1',
'InstanceID': 'test_id_1_delete'
}]},
{
'creator': 'test2',
'low_use': [{
'Creator': 'test2',
'InstanceID': 'test_id_2'
}],
'scheduled_for_deletion': [{
'Creator': 'test2',
'InstanceID': 'test_id_2_delete'
}]}
]
result = list(self.reporter.get_creator_report())
self.assertCountEqual(expected_creator_reports, result)
def test_start(self):
pass
| 33.758621
| 82
| 0.544637
| 9,615
| 0.982125
| 0
| 0
| 7,982
| 0.815322
| 0
| 0
| 1,979
| 0.202145
|
55c1a75a2d6e9fa1c5acdea024449b58927aff23
| 1,009
|
py
|
Python
|
splot/tests/test_viz_libpysal_mpl.py
|
renanxcortes/splot
|
c29e9b5cc92be4c4deee0358c1f462b60b0fe9f7
|
[
"BSD-3-Clause"
] | null | null | null |
splot/tests/test_viz_libpysal_mpl.py
|
renanxcortes/splot
|
c29e9b5cc92be4c4deee0358c1f462b60b0fe9f7
|
[
"BSD-3-Clause"
] | null | null | null |
splot/tests/test_viz_libpysal_mpl.py
|
renanxcortes/splot
|
c29e9b5cc92be4c4deee0358c1f462b60b0fe9f7
|
[
"BSD-3-Clause"
] | null | null | null |
from libpysal.weights.contiguity import Queen
import libpysal
from libpysal import examples
import matplotlib.pyplot as plt
import geopandas as gpd
from splot.libpysal import plot_spatial_weights
def test_plot_spatial_weights():
# get data
gdf = gpd.read_file(examples.get_path('43MUE250GC_SIR.shp'))
gdf.head()
# calculate weights
weights = Queen.from_dataframe(gdf)
# plot weights
fig, _ = plot_spatial_weights(weights, gdf)
plt.close(fig)
# calculate nonplanar_joins
wnp = libpysal.weights.util.nonplanar_neighbors(weights, gdf)
# plot new joins
fig2, _ = plot_spatial_weights(wnp, gdf)
plt.close(fig2)
#customize
fig3, _ = plot_spatial_weights(wnp, gdf, nonplanar_edge_kws=dict(color='#4393c3'))
plt.close(fig3)
# uses a column as the index for spatial weights object
weights_index = Queen.from_dataframe(gdf, idVariable="CD_GEOCMU")
fig, _ = plot_spatial_weights(weights_index, gdf, indexed_on="CD_GEOCMU")
plt.close(fig)
| 33.633333
| 86
| 0.737364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 202
| 0.200198
|
55c1a9520e720c583feab19a26044ebc037a17c8
| 17,245
|
py
|
Python
|
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/tests/test_ir.py
|
BadDevCode/lumberyard
|
3d688932f919dbf5821f0cb8a210ce24abe39e9e
|
[
"AML"
] | 1,738
|
2017-09-21T10:59:12.000Z
|
2022-03-31T21:05:46.000Z
|
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/tests/test_ir.py
|
olivier-be/lumberyard
|
3d688932f919dbf5821f0cb8a210ce24abe39e9e
|
[
"AML"
] | 427
|
2017-09-29T22:54:36.000Z
|
2022-02-15T19:26:50.000Z
|
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/tests/test_ir.py
|
olivier-be/lumberyard
|
3d688932f919dbf5821f0cb8a210ce24abe39e9e
|
[
"AML"
] | 671
|
2017-09-21T08:04:01.000Z
|
2022-03-29T14:30:07.000Z
|
from __future__ import print_function
import numba.unittest_support as unittest
from numba import compiler, ir, objmode
import numpy as np
class TestIR(unittest.TestCase):
def test_IRScope(self):
filename = "<?>"
top = ir.Scope(parent=None, loc=ir.Loc(filename=filename, line=1))
local = ir.Scope(parent=top, loc=ir.Loc(filename=filename, line=2))
apple = local.define('apple', loc=ir.Loc(filename=filename, line=3))
self.assertIs(local.get('apple'), apple)
self.assertEqual(len(local.localvars), 1)
orange = top.define('orange', loc=ir.Loc(filename=filename, line=4))
self.assertEqual(len(local.localvars), 1)
self.assertEqual(len(top.localvars), 1)
self.assertIs(top.get('orange'), orange)
self.assertIs(local.get('orange'), orange)
more_orange = local.define('orange', loc=ir.Loc(filename=filename,
line=5))
self.assertIs(top.get('orange'), orange)
self.assertIsNot(local.get('orange'), not orange)
self.assertIs(local.get('orange'), more_orange)
try:
local.define('orange', loc=ir.Loc(filename=filename, line=5))
except ir.RedefinedError:
pass
else:
self.fail("Expecting an %s" % ir.RedefinedError)
class CheckEquality(unittest.TestCase):
var_a = ir.Var(None, 'a', ir.unknown_loc)
var_b = ir.Var(None, 'b', ir.unknown_loc)
var_c = ir.Var(None, 'c', ir.unknown_loc)
var_d = ir.Var(None, 'd', ir.unknown_loc)
var_e = ir.Var(None, 'e', ir.unknown_loc)
loc1 = ir.Loc('mock', 1, 0)
loc2 = ir.Loc('mock', 2, 0)
loc3 = ir.Loc('mock', 3, 0)
def check(self, base, same=[], different=[]):
for s in same:
self.assertTrue(base == s)
for d in different:
self.assertTrue(base != d)
class TestIRMeta(CheckEquality):
"""
Tests IR node meta, like Loc and Scope
"""
def test_loc(self):
a = ir.Loc('file', 1, 0)
b = ir.Loc('file', 1, 0)
c = ir.Loc('pile', 1, 0)
d = ir.Loc('file', 2, 0)
e = ir.Loc('file', 1, 1)
self.check(a, same=[b,], different=[c, d, e])
f = ir.Loc('file', 1, 0, maybe_decorator=False)
g = ir.Loc('file', 1, 0, maybe_decorator=True)
self.check(a, same=[f, g])
def test_scope(self):
parent1 = ir.Scope(None, self.loc1)
parent2 = ir.Scope(None, self.loc1)
parent3 = ir.Scope(None, self.loc2)
self.check(parent1, same=[parent2, parent3,])
a = ir.Scope(parent1, self.loc1)
b = ir.Scope(parent1, self.loc1)
c = ir.Scope(parent1, self.loc2)
d = ir.Scope(parent3, self.loc1)
self.check(a, same=[b, c, d])
# parent1 and parent2 are equal, so children referring to either parent
# should be equal
e = ir.Scope(parent2, self.loc1)
self.check(a, same=[e,])
class TestIRNodes(CheckEquality):
"""
Tests IR nodes
"""
def test_terminator(self):
# terminator base class inst should always be equal
t1 = ir.Terminator()
t2 = ir.Terminator()
self.check(t1, same=[t2])
def test_jump(self):
a = ir.Jump(1, self.loc1)
b = ir.Jump(1, self.loc1)
c = ir.Jump(1, self.loc2)
d = ir.Jump(2, self.loc1)
self.check(a, same=[b, c], different=[d])
def test_return(self):
a = ir.Return(self.var_a, self.loc1)
b = ir.Return(self.var_a, self.loc1)
c = ir.Return(self.var_a, self.loc2)
d = ir.Return(self.var_b, self.loc1)
self.check(a, same=[b, c], different=[d])
def test_raise(self):
a = ir.Raise(self.var_a, self.loc1)
b = ir.Raise(self.var_a, self.loc1)
c = ir.Raise(self.var_a, self.loc2)
d = ir.Raise(self.var_b, self.loc1)
self.check(a, same=[b, c], different=[d])
def test_staticraise(self):
a = ir.StaticRaise(AssertionError, None, self.loc1)
b = ir.StaticRaise(AssertionError, None, self.loc1)
c = ir.StaticRaise(AssertionError, None, self.loc2)
e = ir.StaticRaise(AssertionError, ("str",), self.loc1)
d = ir.StaticRaise(RuntimeError, None, self.loc1)
self.check(a, same=[b, c], different=[d, e])
def test_branch(self):
a = ir.Branch(self.var_a, 1, 2, self.loc1)
b = ir.Branch(self.var_a, 1, 2, self.loc1)
c = ir.Branch(self.var_a, 1, 2, self.loc2)
d = ir.Branch(self.var_b, 1, 2, self.loc1)
e = ir.Branch(self.var_a, 2, 2, self.loc1)
f = ir.Branch(self.var_a, 1, 3, self.loc1)
self.check(a, same=[b, c], different=[d, e, f])
def test_expr(self):
a = ir.Expr('some_op', self.loc1)
b = ir.Expr('some_op', self.loc1)
c = ir.Expr('some_op', self.loc2)
d = ir.Expr('some_other_op', self.loc1)
self.check(a, same=[b, c], different=[d])
def test_setitem(self):
a = ir.SetItem(self.var_a, self.var_b, self.var_c, self.loc1)
b = ir.SetItem(self.var_a, self.var_b, self.var_c, self.loc1)
c = ir.SetItem(self.var_a, self.var_b, self.var_c, self.loc2)
d = ir.SetItem(self.var_d, self.var_b, self.var_c, self.loc1)
e = ir.SetItem(self.var_a, self.var_d, self.var_c, self.loc1)
f = ir.SetItem(self.var_a, self.var_b, self.var_d, self.loc1)
self.check(a, same=[b, c], different=[d, e, f])
def test_staticsetitem(self):
a = ir.StaticSetItem(self.var_a, 1, self.var_b, self.var_c, self.loc1)
b = ir.StaticSetItem(self.var_a, 1, self.var_b, self.var_c, self.loc1)
c = ir.StaticSetItem(self.var_a, 1, self.var_b, self.var_c, self.loc2)
d = ir.StaticSetItem(self.var_d, 1, self.var_b, self.var_c, self.loc1)
e = ir.StaticSetItem(self.var_a, 2, self.var_b, self.var_c, self.loc1)
f = ir.StaticSetItem(self.var_a, 1, self.var_d, self.var_c, self.loc1)
g = ir.StaticSetItem(self.var_a, 1, self.var_b, self.var_d, self.loc1)
self.check(a, same=[b, c], different=[d, e, f, g])
def test_delitem(self):
a = ir.DelItem(self.var_a, self.var_b, self.loc1)
b = ir.DelItem(self.var_a, self.var_b, self.loc1)
c = ir.DelItem(self.var_a, self.var_b, self.loc2)
d = ir.DelItem(self.var_c, self.var_b, self.loc1)
e = ir.DelItem(self.var_a, self.var_c, self.loc1)
self.check(a, same=[b, c], different=[d, e])
def test_del(self):
a = ir.Del(self.var_a.name, self.loc1)
b = ir.Del(self.var_a.name, self.loc1)
c = ir.Del(self.var_a.name, self.loc2)
d = ir.Del(self.var_b.name, self.loc1)
self.check(a, same=[b, c], different=[d])
def test_setattr(self):
a = ir.SetAttr(self.var_a, 'foo', self.var_b, self.loc1)
b = ir.SetAttr(self.var_a, 'foo', self.var_b, self.loc1)
c = ir.SetAttr(self.var_a, 'foo', self.var_b, self.loc2)
d = ir.SetAttr(self.var_c, 'foo', self.var_b, self.loc1)
e = ir.SetAttr(self.var_a, 'bar', self.var_b, self.loc1)
f = ir.SetAttr(self.var_a, 'foo', self.var_c, self.loc1)
self.check(a, same=[b, c], different=[d, e, f])
def test_delattr(self):
a = ir.DelAttr(self.var_a, 'foo', self.loc1)
b = ir.DelAttr(self.var_a, 'foo', self.loc1)
c = ir.DelAttr(self.var_a, 'foo', self.loc2)
d = ir.DelAttr(self.var_c, 'foo', self.loc1)
e = ir.DelAttr(self.var_a, 'bar', self.loc1)
self.check(a, same=[b, c], different=[d, e])
def test_assign(self):
a = ir.Assign(self.var_a, self.var_b, self.loc1)
b = ir.Assign(self.var_a, self.var_b, self.loc1)
c = ir.Assign(self.var_a, self.var_b, self.loc2)
d = ir.Assign(self.var_c, self.var_b, self.loc1)
e = ir.Assign(self.var_a, self.var_c, self.loc1)
self.check(a, same=[b, c], different=[d, e])
def test_print(self):
a = ir.Print((self.var_a,), self.var_b, self.loc1)
b = ir.Print((self.var_a,), self.var_b, self.loc1)
c = ir.Print((self.var_a,), self.var_b, self.loc2)
d = ir.Print((self.var_c,), self.var_b, self.loc1)
e = ir.Print((self.var_a,), self.var_c, self.loc1)
self.check(a, same=[b, c], different=[d, e])
def test_storemap(self):
a = ir.StoreMap(self.var_a, self.var_b, self.var_c, self.loc1)
b = ir.StoreMap(self.var_a, self.var_b, self.var_c, self.loc1)
c = ir.StoreMap(self.var_a, self.var_b, self.var_c, self.loc2)
d = ir.StoreMap(self.var_d, self.var_b, self.var_c, self.loc1)
e = ir.StoreMap(self.var_a, self.var_d, self.var_c, self.loc1)
f = ir.StoreMap(self.var_a, self.var_b, self.var_d, self.loc1)
self.check(a, same=[b, c], different=[d, e, f])
def test_yield(self):
a = ir.Yield(self.var_a, self.loc1, 0)
b = ir.Yield(self.var_a, self.loc1, 0)
c = ir.Yield(self.var_a, self.loc2, 0)
d = ir.Yield(self.var_b, self.loc1, 0)
e = ir.Yield(self.var_a, self.loc1, 1)
self.check(a, same=[b, c], different=[d, e])
def test_enterwith(self):
a = ir.EnterWith(self.var_a, 0, 1, self.loc1)
b = ir.EnterWith(self.var_a, 0, 1, self.loc1)
c = ir.EnterWith(self.var_a, 0, 1, self.loc2)
d = ir.EnterWith(self.var_b, 0, 1, self.loc1)
e = ir.EnterWith(self.var_a, 1, 1, self.loc1)
f = ir.EnterWith(self.var_a, 0, 2, self.loc1)
self.check(a, same=[b, c], different=[d, e, f])
def test_arg(self):
a = ir.Arg('foo', 0, self.loc1)
b = ir.Arg('foo', 0, self.loc1)
c = ir.Arg('foo', 0, self.loc2)
d = ir.Arg('bar', 0, self.loc1)
e = ir.Arg('foo', 1, self.loc1)
self.check(a, same=[b, c], different=[d, e])
def test_const(self):
a = ir.Const(1, self.loc1)
b = ir.Const(1, self.loc1)
c = ir.Const(1, self.loc2)
d = ir.Const(2, self.loc1)
self.check(a, same=[b, c], different=[d])
def test_global(self):
a = ir.Global('foo', 0, self.loc1)
b = ir.Global('foo', 0, self.loc1)
c = ir.Global('foo', 0, self.loc2)
d = ir.Global('bar', 0, self.loc1)
e = ir.Global('foo', 1, self.loc1)
self.check(a, same=[b, c], different=[d, e])
def test_var(self):
a = ir.Var(None, 'foo', self.loc1)
b = ir.Var(None, 'foo', self.loc1)
c = ir.Var(None, 'foo', self.loc2)
d = ir.Var(ir.Scope(None, ir.unknown_loc), 'foo', self.loc1)
e = ir.Var(None, 'bar', self.loc1)
self.check(a, same=[b, c, d], different=[e])
def test_intrinsic(self):
a = ir.Intrinsic('foo', 'bar', (0,), self.loc1)
b = ir.Intrinsic('foo', 'bar', (0,), self.loc1)
c = ir.Intrinsic('foo', 'bar', (0,), self.loc2)
d = ir.Intrinsic('baz', 'bar', (0,), self.loc1)
e = ir.Intrinsic('foo', 'baz', (0,), self.loc1)
f = ir.Intrinsic('foo', 'bar', (1,), self.loc1)
self.check(a, same=[b, c], different=[d, e, f])
def test_undefinedtype(self):
a = ir.UndefinedType()
b = ir.UndefinedType()
self.check(a, same=[b])
def test_loop(self):
a = ir.Loop(1, 3)
b = ir.Loop(1, 3)
c = ir.Loop(2, 3)
d = ir.Loop(1, 4)
self.check(a, same=[b], different=[c, d])
def test_with(self):
a = ir.With(1, 3)
b = ir.With(1, 3)
c = ir.With(2, 3)
d = ir.With(1, 4)
self.check(a, same=[b], different=[c, d])
# used later
_GLOBAL = 1234
class TestIRCompounds(CheckEquality):
"""
Tests IR concepts that have state
"""
def test_varmap(self):
a = ir.VarMap()
a.define(self.var_a, 'foo')
a.define(self.var_b, 'bar')
b = ir.VarMap()
b.define(self.var_a, 'foo')
b.define(self.var_b, 'bar')
c = ir.VarMap()
c.define(self.var_a, 'foo')
c.define(self.var_c, 'bar')
self.check(a, same=[b], different=[c])
def test_block(self):
def gen_block():
parent = ir.Scope(None, self.loc1)
tmp = ir.Block(parent, self.loc2)
assign1 = ir.Assign(self.var_a, self.var_b, self.loc3)
assign2 = ir.Assign(self.var_a, self.var_c, self.loc3)
assign3 = ir.Assign(self.var_c, self.var_b, self.loc3)
tmp.append(assign1)
tmp.append(assign2)
tmp.append(assign3)
return tmp
a = gen_block()
b = gen_block()
c = gen_block().append(ir.Assign(self.var_a, self.var_b, self.loc3))
self.check(a, same=[b], different=[c])
def test_functionir(self):
# this creates a function full of all sorts of things to ensure the IR
# is pretty involved, it then compares two instances of the compiled
# function IR to check the IR is the same invariant of objects, and then
# a tiny mutation is made to the IR in the second function and detection
# of this change is checked.
def gen():
_FREEVAR = 0xCAFE
def foo(a, b, c=12, d=1j, e=None):
f = a + b
a += _FREEVAR
g = np.zeros(c, dtype=np.complex64)
h = f + g
i = 1j / d
if np.abs(i) > 0:
k = h / i
l = np.arange(1, c + 1)
with objmode():
print(e, k)
m = np.sqrt(l - g)
if np.abs(m[0]) < 1:
n = 0
for o in range(a):
n += 0
if np.abs(n) < 3:
break
n += m[2]
p = g / l
q = []
for r in range(len(p)):
q.append(p[r])
if r > 4 + 1:
with objmode(s='intp', t='complex128'):
s = 123
t = 5
if s > 122:
t += s
t += q[0] + _GLOBAL
return f + o + r + t + r + a + n
return foo
x = gen()
y = gen()
x_ir = compiler.run_frontend(x)
y_ir = compiler.run_frontend(y)
self.assertTrue(x_ir.equal_ir(y_ir))
def check_diffstr(string, pointing_at=[]):
lines = string.splitlines()
for item in pointing_at:
for l in lines:
if l.startswith('->'):
if item in l:
break
else:
raise AssertionError("Could not find %s " % item)
self.assertIn("IR is considered equivalent", x_ir.diff_str(y_ir))
# minor mutation, simply switch branch targets on last branch
for label in reversed(list(y_ir.blocks.keys())):
blk = y_ir.blocks[label]
if isinstance(blk.body[-1], ir.Branch):
ref = blk.body[-1]
ref.truebr, ref.falsebr = ref.falsebr, ref.truebr
break
check_diffstr(x_ir.diff_str(y_ir), ['branch'])
z = gen()
self.assertFalse(x_ir.equal_ir(y_ir))
z_ir = compiler.run_frontend(z)
change_set = set()
for label in reversed(list(z_ir.blocks.keys())):
blk = z_ir.blocks[label]
ref = blk.body[:-1]
idx = None
for i in range(len(ref)):
# look for two adjacent Del
if (isinstance(ref[i], ir.Del) and
isinstance(ref[i + 1], ir.Del)):
idx = i
break
if idx is not None:
b = blk.body
change_set.add(str(b[idx + 1]))
change_set.add(str(b[idx]))
b[idx], b[idx + 1] = b[idx + 1], b[idx]
break
self.assertFalse(x_ir.equal_ir(z_ir))
self.assertEqual(len(change_set), 2)
for item in change_set:
self.assertTrue(item.startswith('del '))
check_diffstr(x_ir.diff_str(z_ir), change_set)
def foo(a, b):
c = a * 2
d = c + b
e = np.sqrt(d)
return e
def bar(a, b): # same as foo
c = a * 2
d = c + b
e = np.sqrt(d)
return e
def baz(a, b):
c = a * 2
d = b + c
e = np.sqrt(d + 1)
return e
foo_ir = compiler.run_frontend(foo)
bar_ir = compiler.run_frontend(bar)
self.assertTrue(foo_ir.equal_ir(bar_ir))
self.assertIn("IR is considered equivalent", foo_ir.diff_str(bar_ir))
baz_ir = compiler.run_frontend(baz)
self.assertFalse(foo_ir.equal_ir(baz_ir))
tmp = foo_ir.diff_str(baz_ir)
self.assertIn("Other block contains more statements", tmp)
check_diffstr(tmp, ["c + b", "b + c"])
if __name__ == '__main__':
unittest.main()
| 36.458774
| 80
| 0.534126
| 17,011
| 0.986431
| 0
| 0
| 0
| 0
| 0
| 0
| 1,313
| 0.076138
|
55c37842e6305ac81f748c98ec0be9fc4a30c176
| 13,629
|
py
|
Python
|
pyannote/audio/applications/base.py
|
Ruslanmlnkv/pyannote-audio
|
b678920057ace936c8900c62d2975e958903fae2
|
[
"MIT"
] | 2
|
2018-10-25T19:32:27.000Z
|
2021-06-19T15:14:16.000Z
|
pyannote/audio/applications/base.py
|
Ruslanmlnkv/pyannote-audio
|
b678920057ace936c8900c62d2975e958903fae2
|
[
"MIT"
] | null | null | null |
pyannote/audio/applications/base.py
|
Ruslanmlnkv/pyannote-audio
|
b678920057ace936c8900c62d2975e958903fae2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2017 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr
import time
import yaml
from os.path import dirname, basename
import numpy as np
from tqdm import tqdm
from glob import glob
from pyannote.database import FileFinder
from pyannote.database import get_protocol
from pyannote.audio.util import mkdir_p
from sortedcontainers import SortedDict
import tensorboardX
from functools import partial
class Application(object):
CONFIG_YML = '{experiment_dir}/config.yml'
TRAIN_DIR = '{experiment_dir}/train/{protocol}.{subset}'
WEIGHTS_PT = '{train_dir}/weights/{epoch:04d}.pt'
# created by "validate" mode
VALIDATE_DIR = '{train_dir}/validate/{protocol}.{subset}'
@classmethod
def from_train_dir(cls, train_dir, db_yml=None):
experiment_dir = dirname(dirname(train_dir))
app = cls(experiment_dir, db_yml=db_yml)
app.train_dir_ = train_dir
return app
@classmethod
def from_validate_txt(cls, validate_txt, db_yml=None):
train_dir = dirname(dirname(dirname(validate_txt)))
app = cls.from_train_dir(train_dir, db_yml=db_yml)
app.validate_txt_ = validate_txt
return app
@classmethod
def from_model_pt(cls, model_pt, db_yml=None):
train_dir = dirname(dirname(model_pt))
app = cls.from_train_dir(train_dir, db_yml=db_yml)
app.model_pt_ = model_pt
epoch = int(basename(app.model_pt_)[:-3])
app.model_ = app.load_model(epoch, train_dir=train_dir)
return app
def __init__(self, experiment_dir, db_yml=None):
super(Application, self).__init__()
self.db_yml = db_yml
self.preprocessors_ = {'audio': FileFinder(self.db_yml)}
self.experiment_dir = experiment_dir
# load configuration
config_yml = self.CONFIG_YML.format(experiment_dir=self.experiment_dir)
with open(config_yml, 'r') as fp:
self.config_ = yaml.load(fp)
# scheduler
SCHEDULER_DEFAULT = {'name': 'DavisKingScheduler',
'params': {'learning_rate': 'auto'}}
scheduler_cfg = self.config_.get('scheduler', SCHEDULER_DEFAULT)
scheduler_name = scheduler_cfg['name']
schedulers = __import__('pyannote.audio.train.schedulers',
fromlist=[scheduler_name])
Scheduler = getattr(schedulers, scheduler_name)
scheduler_params = scheduler_cfg.get('params', {})
self.get_scheduler_ = partial(Scheduler, **scheduler_params)
self.learning_rate_ = scheduler_params.get('learning_rate', 'auto')
# optimizer
OPTIMIZER_DEFAULT = {
'name': 'SGD',
'params': {'momentum': 0.9, 'dampening': 0, 'weight_decay': 0,
'nesterov': True}}
optimizer_cfg = self.config_.get('optimizer', OPTIMIZER_DEFAULT)
optimizer_name = optimizer_cfg['name']
optimizers = __import__('torch.optim',
fromlist=[optimizer_name])
Optimizer = getattr(optimizers, optimizer_name)
optimizer_params = optimizer_cfg.get('params', {})
self.get_optimizer_ = partial(Optimizer, **optimizer_params)
# feature extraction
if 'feature_extraction' in self.config_:
extraction_name = self.config_['feature_extraction']['name']
features = __import__('pyannote.audio.features',
fromlist=[extraction_name])
FeatureExtraction = getattr(features, extraction_name)
self.feature_extraction_ = FeatureExtraction(
**self.config_['feature_extraction'].get('params', {}))
def train(self, protocol_name, subset='train', restart=None, epochs=1000):
train_dir = self.TRAIN_DIR.format(
experiment_dir=self.experiment_dir,
protocol=protocol_name,
subset=subset)
protocol = get_protocol(protocol_name, progress=True,
preprocessors=self.preprocessors_)
self.task_.fit(
self.model_, self.feature_extraction_,
protocol, subset=subset, restart=restart, epochs=epochs,
get_optimizer=self.get_optimizer_,
get_scheduler=self.get_scheduler_,
learning_rate=self.learning_rate_,
log_dir=train_dir, device=self.device)
def load_model(self, epoch, train_dir=None):
"""Load pretrained model
Parameters
----------
epoch : int
Which epoch to load.
train_dir : str, optional
Path to train directory. Defaults to self.train_dir_.
"""
if train_dir is None:
train_dir = self.train_dir_
import torch
weights_pt = self.WEIGHTS_PT.format(
train_dir=train_dir, epoch=epoch)
self.model_.load_state_dict(torch.load(weights_pt))
return self.model_
def get_number_of_epochs(self, train_dir=None, return_first=False):
"""Get information about completed epochs
Parameters
----------
train_dir : str, optional
Training directory. Defaults to self.train_dir_
return_first : bool, optional
Defaults (False) to return number of epochs.
Set to True to also return index of first epoch.
"""
if train_dir is None:
train_dir = self.train_dir_
directory = self.WEIGHTS_PT.format(train_dir=train_dir, epoch=0)[:-7]
weights = sorted(glob(directory + '*[0-9][0-9][0-9][0-9].pt'))
if not weights:
number_of_epochs = 0
first_epoch = None
else:
number_of_epochs = int(basename(weights[-1])[:-3]) + 1
first_epoch = int(basename(weights[0])[:-3])
return (number_of_epochs, first_epoch) if return_first \
else number_of_epochs
def validate_init(self, protocol_name, subset='development'):
pass
def validate_epoch(self, epoch, protocol_name, subset='development',
validation_data=None):
raise NotImplementedError('')
def validate(self, protocol_name, subset='development',
every=1, start=0, end=None, in_order=False, **kwargs):
minimize, values, best_epoch, best_value = {}, {}, {}, {}
validate_dir = self.VALIDATE_DIR.format(train_dir=self.train_dir_,
protocol=protocol_name,
subset=subset)
mkdir_p(validate_dir)
writer = tensorboardX.SummaryWriter(log_dir=validate_dir)
validation_data = self.validate_init(protocol_name, subset=subset,
**kwargs)
progress_bar = tqdm(unit='epoch')
for i, epoch in enumerate(
self.validate_iter(start=start, end=end, step=every,
in_order=in_order)):
# {'metric1': {'minimize': True, 'value': 0.2},
# 'metric2': {'minimize': False, 'value': 0.9}}
metrics = self.validate_epoch(epoch, protocol_name, subset=subset,
validation_data=validation_data)
if i == 0:
for metric, details in metrics.items():
minimize[metric] = details.get('minimize', True)
values[metric] = SortedDict()
description = 'Epoch #{epoch}'.format(epoch=epoch)
for metric, details in sorted(metrics.items()):
value = details['value']
values[metric][epoch] = value
writer.add_scalar(
f'validate/{protocol_name}.{subset}/{metric}',
values[metric][epoch], global_step=epoch)
# keep track of best epoch so far
if minimize[metric] == 'NA':
best_value = 'NA'
elif minimize[metric]:
best_epoch = \
values[metric].iloc[np.argmin(values[metric].values())]
best_value = values[metric][best_epoch]
else:
best_epoch = \
values[metric].iloc[np.argmax(values[metric].values())]
best_value = values[metric][best_epoch]
if best_value == 'NA':
continue
if abs(best_value) < 1:
addon = (' : {metric} = {value:.3f}% '
'[{best_value:.3f}%, #{best_epoch}]')
description += addon.format(metric=metric, value=100 * value,
best_value=100 * best_value,
best_epoch=best_epoch)
else:
addon = (' : {metric} = {value:.3f} '
'[{best_value:.3f}, #{best_epoch}]')
description += addon.format(metric=metric, value=value,
best_value=best_value,
best_epoch=best_epoch)
progress_bar.set_description(description)
progress_bar.update(1)
def validate_iter(self, start=None, end=None, step=1, sleep=10,
in_order=False):
"""Continuously watches `train_dir` for newly completed epochs
and yields them for validation
Note that epochs will not necessarily be yielded in order.
The very last completed epoch will always be first on the list.
Parameters
----------
start : int, optional
Start validating after `start` epochs. Defaults to 0.
end : int, optional
Stop validating after epoch `end`. Defaults to never stop.
step : int, optional
Validate every `step`th epoch. Defaults to 1.
sleep : int, optional
in_order : bool, optional
Force chronological validation.
Usage
-----
>>> for epoch in app.validate_iter():
... app.validate(epoch)
"""
if end is None:
end = np.inf
if start is None:
start = 0
validated_epochs = set()
next_epoch_to_validate_in_order = start
while next_epoch_to_validate_in_order < end:
# wait for first epoch to complete
_, first_epoch = self.get_number_of_epochs(return_first=True)
if first_epoch is None:
print('waiting for first epoch to complete...')
time.sleep(sleep)
continue
# corner case: make sure this does not wait forever
# for epoch 'start' as it might never happen, in case
# training is started after n pre-existing epochs
if next_epoch_to_validate_in_order < first_epoch:
next_epoch_to_validate_in_order = first_epoch
# first epoch has completed
break
while True:
# check last completed epoch
last_completed_epoch = self.get_number_of_epochs() - 1
# if last completed epoch has not been processed yet,
# always process it first (except if 'in order')
if (not in_order) and (last_completed_epoch not in validated_epochs):
next_epoch_to_validate = last_completed_epoch
time.sleep(5) # HACK give checkpoint time to save weights
# in case no new epoch has completed since last time
# process the next epoch in chronological order (if available)
elif next_epoch_to_validate_in_order <= last_completed_epoch:
next_epoch_to_validate = next_epoch_to_validate_in_order
# otherwise, just wait for a new epoch to complete
else:
time.sleep(sleep)
continue
if next_epoch_to_validate not in validated_epochs:
# yield next epoch to process
yield next_epoch_to_validate
# remember which epoch was processed
validated_epochs.add(next_epoch_to_validate)
# increment 'in_order' processing
if next_epoch_to_validate_in_order == next_epoch_to_validate:
next_epoch_to_validate_in_order += step
| 38.176471
| 81
| 0.595935
| 12,089
| 0.886941
| 3,152
| 0.231255
| 806
| 0.059134
| 0
| 0
| 4,238
| 0.310932
|
55c46dbffcc8bf64a692ba3c182ecb46d711b58d
| 9,359
|
py
|
Python
|
cogs/games/checkers.py
|
itsVale/Vale.py
|
6b3cac68d53e8d814ee969a959aae4de52beda80
|
[
"MIT"
] | 14
|
2018-08-06T06:45:19.000Z
|
2018-12-28T14:20:33.000Z
|
cogs/games/checkers.py
|
Mystic-Alchemy/Vale.py
|
b4cc964d34672444c65e2801a15f37d774c5e6e3
|
[
"MIT"
] | 10
|
2018-10-06T10:52:08.000Z
|
2018-12-28T14:21:47.000Z
|
cogs/games/checkers.py
|
Mystic-Alchemy/Vale.py
|
b4cc964d34672444c65e2801a15f37d774c5e6e3
|
[
"MIT"
] | 13
|
2018-09-23T20:13:10.000Z
|
2019-01-26T11:02:37.000Z
|
import itertools
import random
import re
import discord
from more_itertools import chunked, pairwise, sliced, spy
from .base import Status, TwoPlayerGameCog, TwoPlayerSession
from utils.misc import emoji_url
BLACK, WHITE = False, True
PIECES = BK_PIECE, WH_PIECE = 'bw'
KINGS = BK_KING, WH_KING = 'BW'
CHECKERS_BLACK_KING = '\N{HEAVY BLACK HEART}'
CHECKERS_WHITE_KING = '\N{BLUE HEART}'
CHECKERS_BLACK_LAST_MOVE = ''
CHECKERS_WHITE_LAST_MOVE = ''
_is_king = str.isupper
def _get_checkers(start, end, direction):
return [
(x, y) for y, x in itertools.product(range(start, end + direction, direction), range(8))
if (x + y) % 2 == 1
]
_STARTING_BOARD = [
' ', BK_PIECE, ' ', BK_PIECE, ' ', BK_PIECE, ' ', BK_PIECE,
BK_PIECE, ' ', BK_PIECE, ' ', BK_PIECE, ' ', BK_PIECE, ' ',
' ', BK_PIECE, ' ', BK_PIECE, ' ', BK_PIECE, ' ', BK_PIECE,
' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ',
' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ',
WH_PIECE, ' ', WH_PIECE, ' ', WH_PIECE, ' ', WH_PIECE, ' ',
' ', WH_PIECE, ' ', WH_PIECE, ' ', WH_PIECE, ' ', WH_PIECE,
WH_PIECE, ' ', WH_PIECE, ' ', WH_PIECE, ' ', WH_PIECE, ' ',
]
X = 'abcdefgh'
Y = '87654321'
def _to_i(x, y):
return y * 8 + x
_STARTING_BOARD = [' '] * 64
_STARTING_BOARD[_to_i(3, 4)] = BK_PIECE
_STARTING_BOARD[_to_i(4, 3)] = WH_PIECE
def _i_to_xy(i):
y, x = divmod(i, 8)
return X[x] + Y[y]
def _xy_to_i(xy):
x, y = xy
return _to_i(X.index(x), Y.index(y))
def _in_range(x, y):
return 0 <= x < 8 and 0 <= y < 8
def _moves(x, y, dy):
return [_to_i(x + dx, y + dy) for dx in (-1, 1) if _in_range(x + dx, y + dy)]
def _captures(x, y, dy):
return [
(_to_i(x + dx, y + dy), _to_i(x + dx * 2, y + dy * 2))
for dx in (-1, 1)
if _in_range(x + dx, y + dy) and _in_range(x + dx * 2, y + dy * 2)
]
def _make_dict(f):
moves = {
BK_PIECE: {_to_i(x, y): f(x, y, 1) for x, y in _get_checkers(0, 8, 1)},
WH_PIECE: {_to_i(x, y): f(x, y, -1) for x, y in _get_checkers(8, 0, -1)}
}
# Kings can move anywhere
moves[BK_KING] = moves[WH_KING] = {
k: moves[WH_PIECE].get(k, []) + moves[BK_PIECE].get(k, [])
for k in moves[BK_PIECE].keys() | moves[WH_PIECE].keys()
}
return moves
# Generate lookup table for moves
_MOVES = _make_dict(_moves)
_CAPTURES = _make_dict(_captures)
class Board:
TILES = {
BLACK: '\N{BLACK LARGE SQUARE}',
WHITE: '\N{WHITE LARGE SQUARE}',
BK_PIECE: '\N{LARGE RED CIRCLE}',
WH_PIECE: '\N{LARGE BLUE CIRCLE}',
BK_KING: '\N{HEAVY BLACK HEART}',
WH_KING: '\N{BLUE HEART}',
'BK_LAST_MOVE': '',
'WH_LAST_MOVE': '',
}
X = '\u200b'.join(map(chr, range(0x1f1e6, 0x1f1ee)))
Y = [f'{i}\u20e3' for i in Y]
def __init__(self):
self._board = _STARTING_BOARD[:]
self._half_moves = 0
self._last_move = None
self.turn = WHITE
def __str__(self):
rows = list(self._tiles())
if self._last_move:
last_move_tile = self.TILES[['WH_LAST_MOVE', 'BK_LAST_MOVE'][self.turn]]
if last_move_tile:
for i in map(_xy_to_i, chunked(self._last_move[:-2], 2)):
rows[i] = last_move_tile
board = '\n'.join(f'{y}{"".join(chunk)}' for y, chunk in zip(self.Y, chunked(rows, 8)))
return f'\N{BLACK LARGE SQUARE}{self.X}\n{board}'
@property
def half_moves(self):
return self._half_moves
def _tiles(self):
tiles = self.TILES
for i, char in enumerate(self._board):
key = not sum(divmod(i, 8)) % 2 if char == ' ' else char
yield tiles[key]
def _find_all_pieces(self, colour):
return [i for i, v in enumerate(self._board) if v.lower() == colour]
def legal_moves(self):
"""Generates all legal moves in the current position.
If there are any jumps one could make, those get generated instead,
as jumps must be made according to the rules of Checkers.
"""
jumps_exist, jumps = spy(self.jumps())
if jumps_exist:
yield from jumps
return
board = self._board
for i in self._find_all_pieces(PIECES[self.turn]):
for end in _MOVES[board[i]][i]:
if board[end] == ' ':
yield _i_to_xy(i) + _i_to_xy(end)
def jumps(self):
"""Generates all jumps one can make in the current position."""
owner = PIECES[self.turn]
return itertools.chain.from_iterable(map(self.jumps_from, self._find_all_pieces(owner)))
def jumps_from(self, square):
"""Generates all jumps from a particular square in the current position."""
board = self._board
captures = _CAPTURES[board[square]]
def jump_helper(square, captured):
is_king = _is_king(board[square])
for jump_over, jump_end in captures[square]:
if board[jump_over].lower() != PIECES[not self.turn]:
continue
if jump_over in captured:
# no loops
continue
if board[jump_end] != ' ':
# The square must be empty (obviously)
continue
if not is_king and square >> 3 == 7 * self.turn:
yield square, jump_end
else:
chain_exists, squares = spy(jump_helper(jump_end, captured | {jump_over}))
if chain_exists:
for sequence in squares:
yield (square, *sequence)
else:
yield (square, jump_end)
return (''.join(map(_i_to_xy, s)) for s in jump_helper(square, set()))
def is_game_over(self):
"""Returns True if the game is over for the current player. False otherwise."""
return next(self.legal_moves(), None) is None
def move(self, move):
"""Takes a move and apply it to the game."""
if move not in self.legal_moves():
raise ValueError(f'illegal move: {move!r}')
board = self._board
squares = [_xy_to_i(xy) for xy in sliced(move, 2)]
end = squares[-1]
piece = board[squares[0]]
if end >> 3 == 7 * (not self.turn) and not _is_king(piece):
# New king
piece = piece.upper()
for before, after in pairwise(squares):
difference = abs(before - after)
if difference not in {18, 14}:
continue
# A two step rather than a one step means a capture.
square_between = min(before, after) + difference // 2
board[square_between] = ' '
board[squares[0]] = ' '
board[end] = piece
self._last_move = move
self._half_moves += 1
self.turn = not self.turn
# Below is the game logic. If you just want to copy the board, Ignore this.
_VALID_MOVE_REGEX = re.compile(r'^([a-h][1-8]\s?)+', re.IGNORECASE)
_MESSAGES = {
Status.PLAYING: 'Your turn, {user}',
Status.END: '{user} wins!',
Status.QUIT: '{user} ragequitted.',
Status.TIMEOUT: '{user} ran out of time.',
}
def _safe_sample(population, k):
return random.sample(population, min(k, len(population)))
class CheckersSession(TwoPlayerSession, move_pattern=_VALID_MOVE_REGEX, board_factory=Board):
def __init__(self, ctx, opponent):
super().__init__(ctx, opponent)
if ctx.bot_has_permissions(external_emojis=True):
self._board.TILES = {
**self._board.TILES,
BK_KING: str(CHECKERS_BLACK_KING),
WH_KING: str(CHECKERS_WHITE_KING),
'BK_LAST_MOVE': str(CHECKERS_BLACK_LAST_MOVE),
'WH_LAST_MOVE': str(CHECKERS_WHITE_LAST_MOVE),
}
def current(self):
return self._players[self._board.turn]
def _push_move(self, move):
self._board.move(move[0])
def _is_game_over(self):
return self._board.is_game_over()
def _instructions(self):
if self._board.half_moves >= 4:
return ''
sample = _safe_sample(list(self._board.legal_moves()), 5)
joined = ', '.join(f'`{c}`' for c in sample)
return (
'**Instructions:**\n'
'Type the position of the piece you want to move,\n'
'and where you want to move it.\n'
f'**Example:**\n{joined}\n\u200b\n'
)
async def _update_display(self):
board = self._board
if self._status is Status.PLAYING:
instructions = self._instructions()
icon = emoji_url(board.TILES[PIECES[board.turn]])
else:
instructions = ''
icon = discord.Embed.Empty
if self._status is Status.END:
user = self._players[not self._board.turn]
else:
user = self.current()
header = _MESSAGES[self._status].format(user=user)
self._display.description = f'{instructions}{board}'
self._display.set_author(name=header, icon_url=icon)
class Checkers(TwoPlayerGameCog, game_cls=CheckersSession):
"""Shortest cog I ever made. Well, games are special."""
def setup(bot):
bot.add_cog(Checkers(bot))
| 30.093248
| 96
| 0.567048
| 6,497
| 0.694198
| 1,960
| 0.209424
| 67
| 0.007159
| 622
| 0.06646
| 1,593
| 0.17021
|
55c5a244138d1f9a3e5a9c72e37cf112606b9cae
| 767
|
py
|
Python
|
setup.py
|
Fohlen/yente
|
bcba9ef3f766fea115de7eb381d7ad1b385d8df8
|
[
"MIT"
] | null | null | null |
setup.py
|
Fohlen/yente
|
bcba9ef3f766fea115de7eb381d7ad1b385d8df8
|
[
"MIT"
] | null | null | null |
setup.py
|
Fohlen/yente
|
bcba9ef3f766fea115de7eb381d7ad1b385d8df8
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
with open("README.md") as f:
long_description = f.read()
setup(
name="yente",
version="1.3.5",
url="https://opensanctions.org/docs/api/",
long_description=long_description,
long_description_content_type="text/markdown",
license="MIT",
author="OpenSanctions",
author_email="info@opensanctions.org",
packages=find_packages(exclude=["examples", "test"]),
namespace_packages=[],
extras_require={
"dev": [
"pip>=10.0.0",
"bump2version",
"wheel>=0.29.0",
"twine",
"mypy",
"pytest",
"pytest-cov",
"flake8>=2.6.0",
"black",
],
},
zip_safe=False,
)
| 23.242424
| 57
| 0.548892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 239
| 0.311604
|
55c74a48da6996ad1f49dfbcbd9bd447049566b8
| 451
|
py
|
Python
|
python-pulseaudio-master/setup.py
|
rrbutani/SoundAndColor
|
44992fa188c109a3b11b2df137b9272a0b6203d8
|
[
"Unlicense"
] | null | null | null |
python-pulseaudio-master/setup.py
|
rrbutani/SoundAndColor
|
44992fa188c109a3b11b2df137b9272a0b6203d8
|
[
"Unlicense"
] | null | null | null |
python-pulseaudio-master/setup.py
|
rrbutani/SoundAndColor
|
44992fa188c109a3b11b2df137b9272a0b6203d8
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
from distutils.core import setup
setup(name='libpulseaudio',
version='1.1',
description='simple libpulseaudio bindings',
author='Valodim',
author_email='valodim@mugenguild.com',
license='LGPL',
url='http://github.com/valodim/python-pulseaudio',
packages=['pulseaudio'],
provides=['libpulseaudio'],
download_url='http://datatomb.de/~valodim/libpulseaudio-1.1.tar.gz'
)
| 28.1875
| 73
| 0.662971
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 237
| 0.525499
|
55c807db743e48332bd230ddf2d2f732bbf1c1d4
| 2,006
|
py
|
Python
|
vectorization.py
|
creadal/articles-classifier
|
d7b7df5687e57da91fae2bb095f1617d729a00a2
|
[
"MIT"
] | null | null | null |
vectorization.py
|
creadal/articles-classifier
|
d7b7df5687e57da91fae2bb095f1617d729a00a2
|
[
"MIT"
] | null | null | null |
vectorization.py
|
creadal/articles-classifier
|
d7b7df5687e57da91fae2bb095f1617d729a00a2
|
[
"MIT"
] | null | null | null |
import codecs
import numpy as np
import random
categories = ['science', 'style', 'culture', 'life', 'economics', 'business', 'travel', 'forces', 'media', 'sport']
dict_file = codecs.open('processed/dictionary.txt', 'r', 'utf_8_sig')
dictionary = []
for line in dict_file:
line = line[: len(line) - 1]
dictionary.append(line)
def similar_words(word1, word2, coef = .5):
if len(word1) == len(word2):
ch = 0
n = len(word1)
zn = 0
for i in range(n):
zn += np.sqrt(n-i)
for i in range(n):
if word1[i] == word2[i]:
ch+=np.sqrt(n-i)
if ch/zn >= coef:
return True
else:
return False
else:
return False
def remove_punctuation(word):
punctuation = ['!', ':', ':', ',', '.', '?', "'", '"', '(', ')', '«', '»', '+', '-', '=', '_', '/', '\\', '|', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
new_word = ''
for symbol in word:
if symbol not in punctuation:
new_word += symbol
return new_word.lower()
def line2vec(line, dictionary):
vector = [0] * len(dictionary)
for word in line.split():
word = remove_punctuation(word)
for d in dictionary:
if similar_words(word, d):
vector[dictionary.index(d)] += 1
return vector
train_file = codecs.open('news_train.txt', 'r', 'utf_8_sig')
input_vectors = []
outputs = []
for line in train_file:
label, name, content = line.split('\t')
vector = line2vec(name, dictionary)
output = [0]*10
output[categories.index(label)] = 1
input_vectors.append(vector)
outputs.append(output)
train_vectors_i = codecs.open('processed/train_vectors_input.txt', 'w+', 'utf_8_sig')
train_vectors_o = codecs.open('processed/train_vectors_outputs.txt', 'w+', 'utf_8_sig')
for i in input_vectors:
train_vectors_i.write(str(i) + '\n')
for i in outputs:
train_vectors_o.write(str(i) +'\n')
print('text processed')
| 25.717949
| 164
| 0.565803
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 374
| 0.186255
|
55c88b114fda250da3b41e3041303ef9275c30e5
| 4,734
|
py
|
Python
|
data/spca/preprocess.py
|
energydatalab/mrs
|
f2088fd25594ff0c67faac89013c2f1c58942485
|
[
"MIT"
] | null | null | null |
data/spca/preprocess.py
|
energydatalab/mrs
|
f2088fd25594ff0c67faac89013c2f1c58942485
|
[
"MIT"
] | null | null | null |
data/spca/preprocess.py
|
energydatalab/mrs
|
f2088fd25594ff0c67faac89013c2f1c58942485
|
[
"MIT"
] | null | null | null |
# Built-in
import os
from glob import glob
# Libs
import numpy as np
from tqdm import tqdm
from natsort import natsorted
# Own modules
from data import data_utils
from mrs_utils import misc_utils, process_block
# Settings
DS_NAME = 'spca'
def get_images(data_dir, valid_percent=0.5, split=False):
rgb_files = natsorted(glob(os.path.join(data_dir, '*RGB.jpg')))
lbl_files = natsorted(glob(os.path.join(data_dir, '*GT.png')))
'''ind = np.arange(len(rgb_files))
np.random.shuffle(ind)
rgb_files = [rgb_files[a] for a in ind]
lbl_files = [lbl_files[a] for a in ind]'''
assert len(rgb_files) == len(lbl_files)
city_names = ['Fresno', 'Modesto', 'Stockton', 'aus']
city_files = {city_name: [(rgb_file, lbl_file) for (rgb_file, lbl_file) in zip(rgb_files, lbl_files)
if city_name in rgb_file] for city_name in city_names}
train_files, valid_files = [], []
for city_name, file_pairs in city_files.items():
valid_size = int(valid_percent * len(file_pairs))
train_files.extend(file_pairs[valid_size:])
valid_files.extend(file_pairs[:valid_size])
if split:
return train_files, valid_files
else:
return [a[0] for a in valid_files], [a[1] for a in valid_files]
def create_dataset(data_dir, save_dir, patch_size, pad, overlap, valid_percent=0.1, visualize=False):
# create folders and files
patch_dir = os.path.join(save_dir, 'patches')
misc_utils.make_dir_if_not_exist(patch_dir)
record_file_train = open(os.path.join(save_dir, 'file_list_train_{}.txt').format(
misc_utils.float2str(valid_percent)), 'w+')
record_file_valid = open(os.path.join(save_dir, 'file_list_valid_{}.txt').format(
misc_utils.float2str(valid_percent)), 'w+')
train_files, valid_files = get_images(data_dir, valid_percent, split=True)
for img_file, lbl_file in tqdm(train_files):
city_name = os.path.splitext(os.path.basename(img_file))[0].split('_')[0]
for rgb_patch, gt_patch, y, x in data_utils.patch_tile(img_file, lbl_file, patch_size, pad, overlap):
if visualize:
from mrs_utils import vis_utils
vis_utils.compare_figures([rgb_patch, gt_patch], (1, 2), fig_size=(12, 5))
img_patchname = '{}_y{}x{}.jpg'.format(city_name, int(y), int(x))
lbl_patchname = '{}_y{}x{}.png'.format(city_name, int(y), int(x))
# misc_utils.save_file(os.path.join(patch_dir, img_patchname), rgb_patch.astype(np.uint8))
# misc_utils.save_file(os.path.join(patch_dir, lbl_patchname), gt_patch.astype(np.uint8))
record_file_train.write('{} {}\n'.format(img_patchname, lbl_patchname))
for img_file, lbl_file in tqdm(valid_files):
city_name = os.path.splitext(os.path.basename(img_file))[0].split('_')[0]
for rgb_patch, gt_patch, y, x in data_utils.patch_tile(img_file, lbl_file, patch_size, pad, overlap):
if visualize:
from mrs_utils import vis_utils
vis_utils.compare_figures([rgb_patch, gt_patch], (1, 2), fig_size=(12, 5))
img_patchname = '{}_y{}x{}.jpg'.format(city_name, int(y), int(x))
lbl_patchname = '{}_y{}x{}.png'.format(city_name, int(y), int(x))
# misc_utils.save_file(os.path.join(patch_dir, img_patchname), rgb_patch.astype(np.uint8))
# misc_utils.save_file(os.path.join(patch_dir, lbl_patchname), gt_patch.astype(np.uint8))
record_file_valid.write('{} {}\n'.format(img_patchname, lbl_patchname))
def get_stats(img_dir):
from data import data_utils
from glob import glob
rgb_imgs = glob(os.path.join(img_dir, '*RGB.jpg'))
ds_mean, ds_std = data_utils.get_ds_stats(rgb_imgs)
return np.stack([ds_mean, ds_std], axis=0)
def get_stats_pb(img_dir):
val = process_block.ValueComputeProcess(DS_NAME, os.path.join(os.path.dirname(__file__), '../stats/builtin'),
os.path.join(os.path.dirname(__file__), '../stats/builtin/{}.npy'.format(DS_NAME)), func=get_stats).\
run(img_dir=img_dir).val
val_test = val
return val, val_test
if __name__ == '__main__':
img_files = natsorted(glob(os.path.join(r'/home/wh145/data/caemo', '*RGB.jpg')))
np.random.seed(931004)
ps = 512
ol = 0
pd = 0
create_dataset(data_dir=r'/home/wh145/data/caemo',
save_dir=r'/home/wh145/data/caemo/ps_512_ol_0', patch_size=(ps, ps), pad=pd, overlap=ol, visualize=False, valid_percent=0.1)
# val = get_stats_pb(r'/media/ei-edl01/data/uab_datasets/spca/data/Original_Tiles')[0]
# data_utils.patches_to_hdf5(r'/hdd/mrs/spca', r'/hdd/mrs/spca/ps512_pd0_ol0_hdf5')
| 44.660377
| 145
| 0.667934
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,110
| 0.234474
|
55c8ccd7b221f69f74c7f2b403781f9c5546f908
| 3,182
|
py
|
Python
|
tests/test_json_util.py
|
okutane/yandex-taxi-testsuite
|
7e2e3dd5a65869ecbf37bf3f79cba7bb4e782b0c
|
[
"MIT"
] | 128
|
2020-03-10T09:13:41.000Z
|
2022-02-11T20:16:16.000Z
|
tests/test_json_util.py
|
okutane/yandex-taxi-testsuite
|
7e2e3dd5a65869ecbf37bf3f79cba7bb4e782b0c
|
[
"MIT"
] | 3
|
2021-11-01T12:31:27.000Z
|
2022-02-11T13:08:38.000Z
|
tests/test_json_util.py
|
okutane/yandex-taxi-testsuite
|
7e2e3dd5a65869ecbf37bf3f79cba7bb4e782b0c
|
[
"MIT"
] | 22
|
2020-03-05T07:13:12.000Z
|
2022-03-15T10:30:58.000Z
|
import dateutil
import pytest
from testsuite.plugins import mockserver
from testsuite.utils import json_util
NOW = dateutil.parser.parse('2019-09-19-13:04:00.000000')
MOCKSERVER_INFO = mockserver.MockserverInfo(
'localhost', 123, 'http://localhost:123/', None,
)
MOCKSERVER_SSL_INFO = mockserver.MockserverInfo(
'localhost',
456,
'https://localhost:456/',
mockserver.SslInfo('/some_dir/cert.cert', '/some_dir/cert.key'),
)
@pytest.mark.parametrize(
'json_input,expected_result',
[
( # simple list
[{'some_date': {'$dateDiff': 0}}, 'regular_element'], # json_input
[{'some_date': NOW}, 'regular_element'], # expected_result
),
( # simple dict
{ # json_input
'some_date': {'$dateDiff': 0},
'regular_key': 'regular_value',
},
{'some_date': NOW, 'regular_key': 'regular_value'}, # json_input
),
( # nested list and dict
{ # json_input
'regular_root_key': 'regular_root_value',
'root_date': {'$dateDiff': 0},
'parent_key': {
'nested_date': {'$dateDiff': 0},
'nested_list': [
'regular_element1',
{'$dateDiff': 0},
{'$dateDiff': 0},
'regular_element2',
],
},
},
{ # expected_result
'regular_root_key': 'regular_root_value',
'root_date': NOW,
'parent_key': {
'nested_date': NOW,
'nested_list': [
'regular_element1',
NOW,
NOW,
'regular_element2',
],
},
},
),
],
)
def test_substitute_now(json_input, expected_result):
result = json_util.substitute(json_input, now=NOW)
assert result == expected_result
@pytest.mark.parametrize(
'json_input,expected_result',
[
(
({'client_url': {'$mockserver': '/path'}}),
({'client_url': 'http://localhost:123/path'}),
),
(
({'client_url': {'$mockserver': '/path', '$schema': False}}),
({'client_url': 'localhost:123/path'}),
),
],
)
def test_substitute_mockserver(json_input, expected_result):
result = json_util.substitute(json_input, mockserver=MOCKSERVER_INFO)
assert result == expected_result
@pytest.mark.parametrize(
'json_input,expected_result',
[
(
({'client_url': {'$mockserver_https': '/path'}}),
({'client_url': 'https://localhost:456/path'}),
),
(
({'client_url': {'$mockserver_https': '/path', '$schema': False}}),
({'client_url': 'localhost:456/path'}),
),
],
)
def test_substitute_mockserver_https(json_input, expected_result):
result = json_util.substitute(
json_input, mockserver_https=MOCKSERVER_SSL_INFO,
)
assert result == expected_result
| 30.596154
| 79
| 0.511942
| 0
| 0
| 0
| 0
| 2,728
| 0.857322
| 0
| 0
| 1,099
| 0.34538
|
55c8ce13de36aa35d1ea8a967ade5c81bd88fbbc
| 1,066
|
py
|
Python
|
Level/__init__.py
|
PyRectangle/GreyRectangle
|
21c19002f52563a096566e9166040815005b830b
|
[
"MIT"
] | 3
|
2017-09-28T16:53:09.000Z
|
2018-03-18T20:01:41.000Z
|
Level/__init__.py
|
PyRectangle/GreyRectangle
|
21c19002f52563a096566e9166040815005b830b
|
[
"MIT"
] | null | null | null |
Level/__init__.py
|
PyRectangle/GreyRectangle
|
21c19002f52563a096566e9166040815005b830b
|
[
"MIT"
] | null | null | null |
from Level.Render import Render
from Level.Data import Data
from Constants import *
import os
class Level:
def __init__(self, folder, main):
self.main = main
self.name = folder
self.folder = LEVEL_PATH + "/" + folder
self.dataFiles = []
files = os.listdir(self.folder)
for file in files:
if file[0:4] == "data":
self.dataFiles.append(file)
self.render = Render(self, main)
self.data = Data(self)
def rename(self, name):
self.name = name
folder = LEVEL_PATH + "/" + name
os.rename(self.folder, folder)
self.folder = folder
self.main.levelSelection.levelGuiHandler.updateText()
def openSection(self, number):
self.close()
self.data = Data(self, number)
def save(self):
self.data.save()
for region in self.data.regions:
if region.loaded:
region.save()
region.save()
def close(self):
self.data.close()
del self.data
| 26
| 61
| 0.562852
| 969
| 0.909006
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 0.011257
|
55cc899799689985629d17decc9d13ef5c737a0d
| 1,252
|
py
|
Python
|
preparedstatement.py
|
shgysk8zer0/pyutils
|
f7fa2ea7717740f05ea739d20cd8a21701835800
|
[
"MIT"
] | null | null | null |
preparedstatement.py
|
shgysk8zer0/pyutils
|
f7fa2ea7717740f05ea739d20cd8a21701835800
|
[
"MIT"
] | null | null | null |
preparedstatement.py
|
shgysk8zer0/pyutils
|
f7fa2ea7717740f05ea739d20cd8a21701835800
|
[
"MIT"
] | null | null | null |
import sqlite3
class PreparedStatement:
__cursor = None
__sql = ''
__params = {}
def __init__(self, con: sqlite3.Connection, sql: str):
self.__cursor = con.cursor()
self.__sql = sql
self.__params = {}
def __str__(self) -> str:
return self.__sql
def __getitem__(self, key: str):
return self.__params.get(key)
def __delitem__(self, key: str):
del self.__params[key]
def __contains__(self, key: str) -> bool:
return key in self.__params
def __setitem__(self, name: str, value) -> None:
self.__params[name] = value
def __call__(self, **kwargs):
return self.__cursor.execute(self.__sql, {**kwargs, **self.__params})
def bind(self, name: str, value):
self.__params[name] = value
return self
def bindall(self, **kwargs):
self.__params = kwargs
return self
def execute(self, **kwargs):
return self.__cursor.execute(self.__sql, {**self.__params, **kwargs})
def executemany(self, *args):
result = [{**self.__params, **arg} for arg in args]
return self.__cursor.executemany(self.__sql, result)
@property
def params(self) -> dict:
return self.__params
| 26.083333
| 77
| 0.610224
| 1,236
| 0.98722
| 0
| 0
| 68
| 0.054313
| 0
| 0
| 2
| 0.001597
|
55cce6a5f51b48ac0a3f7fb58d81fade424bd086
| 2,787
|
py
|
Python
|
python/communitymanager/lib/basicauthpolicy.py
|
OpenCIOC/communityrepo
|
63199a7b620f5c08624e534faf771e5dd2243adb
|
[
"Apache-2.0"
] | 2
|
2016-01-25T14:40:44.000Z
|
2018-01-31T04:30:23.000Z
|
python/communitymanager/lib/basicauthpolicy.py
|
OpenCIOC/communityrepo
|
63199a7b620f5c08624e534faf771e5dd2243adb
|
[
"Apache-2.0"
] | 5
|
2018-02-07T20:16:49.000Z
|
2021-12-13T19:41:43.000Z
|
python/communitymanager/lib/basicauthpolicy.py
|
OpenCIOC/communityrepo
|
63199a7b620f5c08624e534faf771e5dd2243adb
|
[
"Apache-2.0"
] | 1
|
2018-02-07T20:37:52.000Z
|
2018-02-07T20:37:52.000Z
|
# From the Pyramid Cookbook:
# http://pyramid-cookbook.readthedocs.org/en/latest/auth/basic.html
import binascii
import base64
from paste.httpheaders import AUTHORIZATION
from paste.httpheaders import WWW_AUTHENTICATE
from pyramid.security import Everyone
from pyramid.security import Authenticated
def _get_basicauth_credentials(request):
authorization = AUTHORIZATION(request.environ)
try:
authmeth, auth = authorization.split(' ', 1)
except ValueError: # not enough values to unpack
return None
if authmeth.lower() == 'basic':
try:
auth = base64.b64decode(auth.strip().encode('ascii')).decode('utf-8')
except binascii.Error: # can't decode
return None
try:
login, password = auth.split(':', 1)
except ValueError: # not enough values to unpack
return None
return {'login': login, 'password': password}
return None
class BasicAuthenticationPolicy(object):
""" A :app:`Pyramid` :term:`authentication policy` which
obtains data from basic authentication headers.
Constructor Arguments
``check``
A callback passed the credentials and the request,
expected to return None if the userid doesn't exist or a sequence
of group identifiers (possibly empty) if the user does exist.
Required.
``realm``
Default: ``Realm``. The Basic Auth realm string.
"""
def __init__(self, check, realm='Realm'):
self.check = check
self.realm = realm
def authenticated_userid(self, request):
credentials = _get_basicauth_credentials(request)
if credentials is None:
return None
userid = credentials['login']
if self.check(credentials, request) is not None: # is not None!
return userid
def effective_principals(self, request):
effective_principals = [Everyone]
credentials = _get_basicauth_credentials(request)
if credentials is None:
return effective_principals
userid = credentials['login']
groups = self.check(credentials, request)
if groups is None: # is None!
return effective_principals
effective_principals.append(Authenticated)
effective_principals.append(userid)
effective_principals.extend(groups)
return effective_principals
def unauthenticated_userid(self, request):
creds = _get_basicauth_credentials(request)
if creds is not None:
return creds['login']
return None
def remember(self, request, principal, **kw):
return []
def forget(self, request):
head = WWW_AUTHENTICATE.tuples('Basic realm="%s"' % self.realm)
return head
| 30.626374
| 81
| 0.657696
| 1,835
| 0.658414
| 0
| 0
| 0
| 0
| 0
| 0
| 736
| 0.264083
|
55cd25162b525efcbd0ec6570ea61ed0a8074922
| 4,709
|
py
|
Python
|
eventsourcing/examples/searchabletimestamps/postgres.py
|
ParikhKadam/eventsourcing
|
8d7f8d28c527d7df47a631b009b19b5fdb53740b
|
[
"BSD-3-Clause"
] | 107
|
2021-10-30T14:47:19.000Z
|
2022-03-31T10:52:42.000Z
|
eventsourcing/examples/searchabletimestamps/postgres.py
|
ParikhKadam/eventsourcing
|
8d7f8d28c527d7df47a631b009b19b5fdb53740b
|
[
"BSD-3-Clause"
] | 12
|
2021-11-02T05:52:42.000Z
|
2022-03-08T14:49:09.000Z
|
eventsourcing/examples/searchabletimestamps/postgres.py
|
ParikhKadam/eventsourcing
|
8d7f8d28c527d7df47a631b009b19b5fdb53740b
|
[
"BSD-3-Clause"
] | 8
|
2021-10-29T22:35:54.000Z
|
2022-03-03T04:16:17.000Z
|
from datetime import datetime
from typing import Any, List, Optional, Sequence, Tuple, cast
from uuid import UUID
from eventsourcing.domain import Aggregate
from eventsourcing.examples.searchabletimestamps.persistence import (
SearchableTimestampsRecorder,
)
from eventsourcing.persistence import ApplicationRecorder, StoredEvent
from eventsourcing.postgres import (
Factory,
PostgresApplicationRecorder,
PostgresConnection,
PostgresCursor,
PostgresDatastore,
)
class SearchableTimestampsApplicationRecorder(
SearchableTimestampsRecorder, PostgresApplicationRecorder
):
def __init__(
self,
datastore: PostgresDatastore,
events_table_name: str = "stored_events",
event_timestamps_table_name: str = "event_timestamps",
):
self.check_table_name_length(event_timestamps_table_name, datastore.schema)
self.event_timestamps_table_name = event_timestamps_table_name
super().__init__(datastore, events_table_name)
self.insert_event_timestamp_statement = (
f"INSERT INTO {self.event_timestamps_table_name} VALUES ($1, $2, $3)"
)
self.insert_event_timestamp_statement_name = (
f"insert_{event_timestamps_table_name}".replace(".", "_")
)
self.select_event_timestamp_statement = (
f"SELECT originator_version FROM {self.event_timestamps_table_name} WHERE "
f"originator_id = $1 AND "
f"timestamp <= $2 "
"ORDER BY originator_version DESC "
"LIMIT 1"
)
self.select_event_timestamp_statement_name = (
f"select_{event_timestamps_table_name}".replace(".", "_")
)
def construct_create_table_statements(self) -> List[str]:
statements = super().construct_create_table_statements()
statements.append(
"CREATE TABLE IF NOT EXISTS "
f"{self.event_timestamps_table_name} ("
"originator_id uuid NOT NULL, "
"timestamp timestamp with time zone, "
"originator_version bigint NOT NULL, "
"PRIMARY KEY "
"(originator_id, timestamp))"
)
return statements
def _prepare_insert_events(self, conn: PostgresConnection) -> None:
super()._prepare_insert_events(conn)
self._prepare(
conn,
self.insert_event_timestamp_statement_name,
self.insert_event_timestamp_statement,
)
def _insert_events(
self,
c: PostgresCursor,
stored_events: List[StoredEvent],
**kwargs: Any,
) -> Optional[Sequence[int]]:
notification_ids = super()._insert_events(c, stored_events, **kwargs)
# Insert event timestamps.
event_timestamps_data = cast(
List[Tuple[UUID, datetime, int]], kwargs.get("event_timestamps_data")
)
for event_timestamp_data in event_timestamps_data:
statement_alias = self.statement_name_aliases[
self.insert_event_timestamp_statement_name
]
c.execute(f"EXECUTE {statement_alias}(%s, %s, %s)", event_timestamp_data)
return notification_ids
def get_version_at_timestamp(
self, originator_id: UUID, timestamp: datetime
) -> Optional[int]:
with self.datastore.get_connection() as conn:
self._prepare(
conn,
self.select_event_timestamp_statement_name,
self.select_event_timestamp_statement,
)
with conn.transaction(commit=False) as curs:
statement_alias = self.statement_name_aliases[
self.select_event_timestamp_statement_name
]
curs.execute(
f"EXECUTE {statement_alias}(%s, %s)", [originator_id, timestamp]
)
for row in curs.fetchall():
return row["originator_version"]
else:
return Aggregate.INITIAL_VERSION - 1
class SearchableTimestampsInfrastructureFactory(Factory):
def application_recorder(self) -> ApplicationRecorder:
prefix = (self.datastore.schema + ".") if self.datastore.schema else ""
prefix += self.env.name.lower() or "stored"
events_table_name = prefix + "_events"
event_timestamps_table_name = prefix + "_timestamps"
recorder = SearchableTimestampsApplicationRecorder(
datastore=self.datastore,
events_table_name=events_table_name,
event_timestamps_table_name=event_timestamps_table_name,
)
recorder.create_table()
return recorder
del Factory
| 36.789063
| 87
| 0.647484
| 4,202
| 0.892334
| 0
| 0
| 0
| 0
| 0
| 0
| 754
| 0.160119
|
55cdd7e5e8bf1de41967431dfc57603e40486db0
| 313
|
py
|
Python
|
complete/01 - 10/Problem6/main.py
|
this-jacob/project-euler
|
8f9e700e2875e84d081eade44fd2107db0a0ae12
|
[
"MIT"
] | null | null | null |
complete/01 - 10/Problem6/main.py
|
this-jacob/project-euler
|
8f9e700e2875e84d081eade44fd2107db0a0ae12
|
[
"MIT"
] | null | null | null |
complete/01 - 10/Problem6/main.py
|
this-jacob/project-euler
|
8f9e700e2875e84d081eade44fd2107db0a0ae12
|
[
"MIT"
] | null | null | null |
def main():
squareSum = 0 #(1 + 2)^2 square of the sums
sumSquare = 0 #1^2 + 2^2 sum of the squares
for i in range(1, 101):
sumSquare += i ** 2
squareSum += i
squareSum = squareSum ** 2
print(str(squareSum - sumSquare))
if __name__ == '__main__':
main()
| 19.5625
| 54
| 0.533546
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 70
| 0.223642
|
55ce2377676e46ea6ca7f0b0a8a26da468d757a5
| 1,861
|
py
|
Python
|
Sudoko.py
|
abirbhattacharya82/Sudoko-Solver
|
36ea15d16561fe5031548ed3f4c58757280117f6
|
[
"MIT"
] | 1
|
2021-07-25T03:02:39.000Z
|
2021-07-25T03:02:39.000Z
|
Sudoko.py
|
abirbhattacharya82/Sudoku-Solver
|
36ea15d16561fe5031548ed3f4c58757280117f6
|
[
"MIT"
] | null | null | null |
Sudoko.py
|
abirbhattacharya82/Sudoku-Solver
|
36ea15d16561fe5031548ed3f4c58757280117f6
|
[
"MIT"
] | null | null | null |
def find_space(board):
for i in range(0,9):
for j in range(0,9):
if board[i][j]==0:
return (i,j)
return None
def check(board,num,r,c):
for i in range(0,9):
if board[r][i]==num and c!=i:
return False
for i in range(0,9):
if board[i][c]==num and r!=i:
return False
x=r//3
y=c//3
for i in range(x*3,x*3+3):
for j in range(y*3,y*3+3):
if board[i][j]==num and r!=i and c!=j:
return False
return True
def enter_datas(board):
for i in range(1,10):
print("Enter the Datas in Row ",i)
x=[int(i) for i in input().split()]
board.append(x)
def show(board):
for i in range(0,9):
for j in range(0,9):
if j==2 or j==5:
print(board[i][j]," | ",end="")
else:
print(board[i][j],end=" ")
if i==2 or i==5:
print("\n-----------------------\n")
else:
print("\n")
def solve(board):
x=find_space(board)
if not x:
return True
else:
r,c=x
for i in range(1,10):
if check(board,i,r,c):
board[r][c]=i
if solve(board):
return True
board[r][c]=0
return False
board=[]
enter_datas(board)
show(board)
solve(board)
print("\n\n")
show(board)
'''
Enter the Datas in a Row
7 8 0 4 0 0 1 2 0
Enter the Datas in a Row
6 0 0 0 7 5 0 0 9
Enter the Datas in a Row
0 0 0 6 0 1 0 7 8
Enter the Datas in a Row
0 0 7 0 4 0 2 6 0
Enter the Datas in a Row
0 0 1 0 5 0 9 3 0
Enter the Datas in a Row
9 0 4 0 6 0 0 0 5
Enter the Datas in a Row
0 7 0 3 0 0 0 1 2
Enter the Datas in a Row
1 2 0 0 0 7 4 0 0
Enter the Datas in a Row
0 4 9 2 0 6 0 0 7
'''
| 23.2625
| 51
| 0.476088
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 487
| 0.261687
|
55cf3d9d9f70b37e8b09330bf7dcbd0d8aeb3b5f
| 2,341
|
py
|
Python
|
gbkfit_web/utility/display_names.py
|
ADACS-Australia/ADACS-GBKFIT
|
20c7cafcabb6e75d8c287df06efb43113fdabd25
|
[
"MIT"
] | null | null | null |
gbkfit_web/utility/display_names.py
|
ADACS-Australia/ADACS-GBKFIT
|
20c7cafcabb6e75d8c287df06efb43113fdabd25
|
[
"MIT"
] | null | null | null |
gbkfit_web/utility/display_names.py
|
ADACS-Australia/ADACS-GBKFIT
|
20c7cafcabb6e75d8c287df06efb43113fdabd25
|
[
"MIT"
] | null | null | null |
"""
Distributed under the MIT License. See LICENSE.txt for more info.
"""
# VARIABLES of this file must be unique
from django_hpc_job_controller.client.scheduler.status import JobStatus
# Dictionary to map names and corresponding display names (for UI)
DISPLAY_NAME_MAP = dict()
DISPLAY_NAME_MAP_HPC_JOB = dict()
# Job Status
NONE = 'none'
NONE_DISPLAY = 'None'
DRAFT = 'draft'
DRAFT_DISPLAY = 'Draft'
PENDING = 'pending'
PENDING_DISPLAY = 'Pending'
SUBMITTING = 'submitting'
SUBMITTING_DISPLAY = 'Submitting'
SUBMITTED = 'submitted'
SUBMITTED_DISPLAY = 'Submitted'
QUEUED = 'queued'
QUEUED_DISPLAY = 'Queued'
IN_PROGRESS = 'in_progress'
IN_PROGRESS_DISPLAY = 'In Progress'
CANCELLING = 'cancelling'
CANCELLING_DISPLAY = 'Cancelling'
CANCELLED = 'cancelled'
CANCELLED_DISPLAY = 'Cancelled'
ERROR = 'error'
ERROR_DISPLAY = 'Error'
WALL_TIME_EXCEEDED = 'wall_time_exceeded'
WALL_TIME_EXCEEDED_DISPLAY = 'Wall Time Exceeded'
OUT_OF_MEMORY = 'out_of_memory'
OUT_OF_MEMORY_DISPLAY = 'Out of Memory'
COMPLETED = 'completed'
COMPLETED_DISPLAY = 'Completed'
SAVED = 'saved'
SAVED_DISPLAY = 'Saved'
DELETING = 'deleting'
DELETING_DISPLAY = 'Deleting'
DELETED = 'deleted'
DELETED_DISPLAY = 'Deleted'
PUBLIC = 'public'
PUBLIC_DISPLAY = 'Public'
DISPLAY_NAME_MAP.update({
DRAFT: DRAFT_DISPLAY,
PENDING: PENDING_DISPLAY,
SUBMITTING: SUBMITTING_DISPLAY,
SUBMITTED: SUBMITTED_DISPLAY,
QUEUED: QUEUED_DISPLAY,
IN_PROGRESS: IN_PROGRESS_DISPLAY,
CANCELLING: CANCELLING_DISPLAY,
CANCELLED: CANCELLED_DISPLAY,
ERROR: ERROR_DISPLAY,
WALL_TIME_EXCEEDED: WALL_TIME_EXCEEDED_DISPLAY,
OUT_OF_MEMORY: OUT_OF_MEMORY_DISPLAY,
COMPLETED: COMPLETED_DISPLAY,
SAVED: SAVED_DISPLAY,
DELETING: DELETING_DISPLAY,
DELETED: DELETED_DISPLAY,
PUBLIC: PUBLIC_DISPLAY,
})
DISPLAY_NAME_MAP_HPC_JOB.update({
JobStatus.DRAFT: DRAFT,
JobStatus.PENDING: PENDING,
JobStatus.SUBMITTING: SUBMITTING,
JobStatus.SUBMITTED: SUBMITTED,
JobStatus.QUEUED: QUEUED,
JobStatus.RUNNING: IN_PROGRESS,
JobStatus.CANCELLING: CANCELLING,
JobStatus.CANCELLED: CANCELLED,
JobStatus.ERROR: ERROR,
JobStatus.WALL_TIME_EXCEEDED: WALL_TIME_EXCEEDED,
JobStatus.OUT_OF_MEMORY: OUT_OF_MEMORY,
JobStatus.DELETING: DELETING,
JobStatus.DELETED: DELETED,
JobStatus.COMPLETED: COMPLETED,
})
| 27.541176
| 71
| 0.76463
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 542
| 0.231525
|
55d01698d8da5e9ff89aaf1c3a856cf2b9f42f2c
| 5,227
|
py
|
Python
|
heap/heap.py
|
xyycha/data-struct
|
0a0d46bf6666681be2e4d5a2664b333dd9fb3a95
|
[
"Apache-2.0"
] | 4
|
2020-03-10T07:45:44.000Z
|
2020-03-12T02:00:32.000Z
|
heap/heap.py
|
xyycha/data-struct
|
0a0d46bf6666681be2e4d5a2664b333dd9fb3a95
|
[
"Apache-2.0"
] | 1
|
2020-03-14T01:32:19.000Z
|
2020-03-14T03:06:34.000Z
|
heap/heap.py
|
xyycha/data-struct
|
0a0d46bf6666681be2e4d5a2664b333dd9fb3a95
|
[
"Apache-2.0"
] | null | null | null |
# -*- encoding: utf-8 -*-
import random
from graphviz import Digraph
class HeapNode(object):
def __init__(self, value, info):
self.value = value
self.info = info
class Heap(object):
def __init__(self, cap):
self.cap = cap
self.size = 0
self.heap = [None]
def show(self, file_name=None):
d = Digraph(filename=file_name, directory="./pdf_data")
d.clear()
node_name = []
for node in self.heap:
if node is None:
node_name.append(None)
continue
name = str(id(node))
d.node(name=name, label=str(node.value))
node_name.append(name)
max_father_index = self.size // 2
for father_index in range(1, max_father_index + 1):
left_son_index = father_index * 2
right_son_index = father_index * 2 + 1
if left_son_index <= self.size:
d.edge(head_name=node_name[left_son_index], tail_name=node_name[father_index])
if right_son_index <= self.size:
d.edge(head_name=node_name[right_son_index], tail_name=node_name[father_index])
d.view()
def insert(self, node: HeapNode):
self.heap.append(None)
self.size += 1
index = self.size
while index > 1:
father_index = index // 2
if self.heap[father_index].value > node.value:
self.heap[index] = self.heap[father_index]
index = father_index
else:
break
self.heap[index] = node
return 1
def pop(self):
assert self.size > 0, "空堆"
first_node = self.heap[1]
last_node = self.heap.pop()
self.size -= 1
if first_node == last_node:
return first_node
index = 1
while index <= self.size // 2:
left_son = self.heap[index * 2]
father_index = index
right_son_index = index * 2 + 1
self.heap[index] = left_son
if left_son.value < last_node.value:
index *= 2
if right_son_index <= self.size and self.heap[right_son_index].value < last_node.value and self.heap[right_son_index].value < self.heap[father_index].value:
self.heap[father_index] = self.heap[right_son_index]
index = right_son_index
if index == father_index:
break
self.heap[index] = last_node
return first_node
def find_node_index(self, key):
for index in range(1, self.size + 1):
node_key = self.heap[index].info
if node_key == key:
break
return index
def decrease_value(self, key, value):
index = self.find_node_index(key=key)
self.heap[index].value -= value
father_index = index // 2
while father_index >= 1 and self.heap[father_index].value > self.heap[index].value:
self.swap_two_node(index1=father_index, index2=index)
index = father_index
father_index //= 2
def get_value(self, key):
index = self.find_node_index(key=key)
return self.heap[index].value
def swap_two_node(self, index1, index2):
self.heap[index1], self.heap[index2] = self.heap[index2], self.heap[index1]
def keep_father_lt_son(self, father_index):
"""
下滤 操作
:param father_index: 父节点下标
:return: None
"""
if father_index > self.size // 2:
return
left_index = father_index * 2
right_index = father_index * 2 + 1
index = father_index
if self.heap[left_index].value < self.heap[father_index].value:
index = left_index
if right_index <= self.size and self.heap[right_index].value < self.heap[father_index].value and self.heap[right_index].value < self.heap[left_index].value:
index = right_index
if index == father_index:
return
self.swap_two_node(index1=index, index2=father_index)
self.keep_father_lt_son(father_index=index)
def build_heap(self, n: list):
assert len(n) <= self.cap, "堆超限"
self.heap.extend(n)
self.size = len(n)
father_index = self.size // 2
for index in range(father_index, 0, -1):
self.keep_father_lt_son(father_index=index)
def test1():
h = Heap(cap=20)
for i in range(20):
value = random.randint(0, 100)
info = {"value": value, "key": str(value)}
node = HeapNode(value=value, info=info)
h.insert(node=node)
h.show(file_name="初始堆")
h.pop()
h.show(file_name="第一次pop")
h.pop()
h.show(file_name="第二次pop")
h.pop()
h.show(file_name="第三次pop")
def test2():
node_list = []
pre_res = []
for i in range(20):
value = random.randint(0, 100)
pre_res.append(value)
info = {"value": value, "key": str(value)}
node = HeapNode(value=value, info=info)
node_list.append(node)
print(pre_res)
h = Heap(cap=20)
h.build_heap(node_list)
h.show(file_name="建立堆")
print("end")
if __name__ == "__main__":
test2()
| 32.067485
| 168
| 0.575091
| 4,368
| 0.82649
| 0
| 0
| 0
| 0
| 0
| 0
| 263
| 0.049763
|
55d1d3ad368bdd500bd5c9d98aeb00a9d5dd603d
| 1,899
|
py
|
Python
|
python/federatedml/param/encrypted_mode_calculation_param.py
|
QuantumA/FATE
|
89a3dd593252128c1bf86fb1014b25a629bdb31a
|
[
"Apache-2.0"
] | 1
|
2022-02-07T06:23:15.000Z
|
2022-02-07T06:23:15.000Z
|
python/federatedml/param/encrypted_mode_calculation_param.py
|
JavaGreenHands/FATE
|
ea1e94b6be50c70c354d1861093187e523af32f2
|
[
"Apache-2.0"
] | 11
|
2020-10-09T09:53:50.000Z
|
2021-12-06T16:14:51.000Z
|
python/federatedml/param/encrypted_mode_calculation_param.py
|
JavaGreenHands/FATE
|
ea1e94b6be50c70c354d1861093187e523af32f2
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.param.base_param import BaseParam
class EncryptedModeCalculatorParam(BaseParam):
"""
Define the encrypted_mode_calulator parameters.
Parameters
----------
mode: {'strict', 'fast', 'balance', 'confusion_opt'}
encrypted mode, default: strict
re_encrypted_rate: float or int
numeric number in [0, 1], use when mode equals to 'balance', default: 1
"""
def __init__(self, mode="strict", re_encrypted_rate=1):
self.mode = mode
self.re_encrypted_rate = re_encrypted_rate
def check(self):
descr = "encrypted_mode_calculator param"
self.mode = self.check_and_change_lower(self.mode,
["strict", "fast", "balance", "confusion_opt", "confusion_opt_balance"],
descr)
if self.mode in ["balance", "confusion_opt_balance"]:
if type(self.re_encrypted_rate).__name__ not in ["int", "long", "float"]:
raise ValueError("re_encrypted_rate should be a numeric number")
if not 0.0 <= self.re_encrypted_rate <= 1:
raise ValueError("re_encrypted_rate should in [0, 1]")
return True
| 35.830189
| 120
| 0.646656
| 1,182
| 0.622433
| 0
| 0
| 0
| 0
| 0
| 0
| 1,188
| 0.625592
|
55d2968cb14aa637fc9c4bccc7dba59fba67c074
| 5,832
|
py
|
Python
|
optimization/solution.py
|
silx-kit/silx-training
|
1e24d4fe383263e3466c029073190ed8bb70bb1f
|
[
"CC-BY-4.0"
] | 7
|
2017-05-02T10:03:12.000Z
|
2021-06-28T14:11:32.000Z
|
optimization/solution.py
|
silx-kit/silx-training
|
1e24d4fe383263e3466c029073190ed8bb70bb1f
|
[
"CC-BY-4.0"
] | 23
|
2016-11-21T17:55:11.000Z
|
2021-11-24T13:43:13.000Z
|
optimization/solution.py
|
silx-kit/silx-training
|
1e24d4fe383263e3466c029073190ed8bb70bb1f
|
[
"CC-BY-4.0"
] | 13
|
2016-11-17T10:47:22.000Z
|
2022-02-07T09:38:47.000Z
|
"""Solution of the exercises of Optimization of compute bound Python code"""
import math
import cmath
import numpy as np
import numexpr as ne
import numba as nb
# Needed here since it is used as global variables
# Maximum strain at surface
e0 = 0.01
# Width of the strain profile below the surface
w = 5.0
# Python: Circular crystal ###
def circ_python_1(N, h, k):
x = (np.arange(N) - N / 2).reshape(-1, 1)
y = (np.arange(N) - N / 2).reshape(1, -1)
omega = x * x + y * y <= (N / 2) ** 2
result = np.zeros((h.size, k.size))
for i_h, v_h in enumerate(h): # loop over the reciprocal space coordinates
for i_k, v_k in enumerate(k):
# One should discard bad values
tmp = 0.0
for n in range(N): # loop and sum over unit-cells
for m in range(N):
if omega[n, m]:
tmp += cmath.exp(2j * np.pi * (v_h * n + v_k * m))
result[i_h][i_k] = abs(tmp) ** 2
return result
# Alternative using Python `sum`
def circ_python_1_alt(N, h, k):
# Filter-out position outside crystal once for all
inside_pos = [
(n, m)
for n in range(N)
for m in range(N)
if ((n - N / 2) ** 2 + (m - N / 2) ** 2) <= (N / 2) ** 2
]
result = np.zeros((h.size, k.size))
for i_h, v_h in enumerate(h): # loop over the reciprocal space coordinates
for i_k, v_k in enumerate(k):
result[i_h][i_k] = (
abs(
sum( # Sum over positions inside the crystal
cmath.exp(2j * np.pi * (v_h * n + v_k * m))
for n, m in inside_pos
)
)
** 2
)
return result
# Python: Circular strained crystal ###
def circ_python(N, h, k):
N_2 = N / 2
positions = {}
for i in range(N):
x = i - N_2
for j in range(N):
y = j - N_2
r = (x * x + y * y) ** 0.5
if r <= N_2:
strain = e0 * (1 + math.tanh((r - N_2) / w))
positions[(i, j)] = (i + strain * x, j + strain * y)
result = np.zeros((h.size, k.size))
for i_h, v_h in enumerate(h): # loop over the reciprocal space coordinates
for i_k, v_k in enumerate(k):
# One should discard bad values
tmp = 0.0
for i_n in range(N): # loop and sum over unit-cells
for i_m in range(N):
pos = positions.get((i_n, i_m))
if pos:
n_s, m_s = pos
tmp += cmath.exp(2j * np.pi * (v_h * n_s + v_k * m_s))
result[i_h, i_k] = abs(tmp) ** 2
return result
# Alternative computing list of strained position
def circ_python_alt(N, h, k):
# Compute strained position inside the crystal once for all
strained_pos = []
crystal_radius = N / 2
for n in range(N):
for m in range(N):
# Center is at (N/2, N/2)
x = n - crystal_radius
y = m - crystal_radius
radius = (x ** 2 + y ** 2) ** 0.5
if radius <= crystal_radius:
delta = e0 * (1 + math.tanh((radius - crystal_radius) / w))
strained_pos.append((n + delta * x, m + delta * y))
result = np.zeros((h.size, k.size))
for i_h, v_h in enumerate(h): # loop over the reciprocal space coordinates
for i_k, v_k in enumerate(k):
result[i_h][i_k] = (
abs(
sum(
cmath.exp(2j * np.pi * (v_h * n_s + v_k * m_s))
for n_s, m_s in strained_pos
)
)
** 2
)
return result
# numpy ###
def circ_numpy(N, h, k):
N_2 = N / 2
h = h.reshape(-1, 1, 1, 1)
k = k.reshape(1, -1, 1, 1)
n = np.arange(N).reshape(1, 1, -1, 1)
m = np.arange(N).reshape(1, 1, 1, -1)
radius = np.sqrt((n - N_2) ** 2 + (m - N_2) ** 2)
strain = e0 * (1.0 + np.tanh((radius - N_2) / w))
p_n = n + strain * (n - N_2)
p_m = m + strain * (m - N_2)
omega = radius <= N_2
tmp = omega * np.exp(2j * np.pi * (h * p_n + k * p_m))
return np.abs(tmp.sum(axis=(2, 3))) ** 2
# numexpr ###
def circ_numexpr(N, h, k):
N_2 = N / 2
h = h.reshape(-1, 1, 1, 1)
k = k.reshape(1, -1, 1, 1)
n = np.arange(N).reshape(1, 1, -1, 1)
m = np.arange(N).reshape(1, 1, 1, -1)
radius = ne.evaluate("sqrt((n - N_2)**2 + (m - N_2)**2)")
strain = ne.evaluate("e0 * (1 + tanh((radius-N_2) / w))")
j2pi = np.pi * 2j
tmp = ne.evaluate(
"where(radius > N_2, 0, exp(j2pi*(h*(n+strain*(n-N_2)) + k*(m+strain*(m-N_2)))))"
)
result = abs(tmp.sum(axis=(2, 3))) ** 2
return result
# numba ###
@nb.jit(parallel=True)
def circ_numba(N, h, k):
result = np.zeros((h.size, k.size), dtype=np.float64)
N_2 = N / 2
for h_i in nb.prange(h.size): # loop over the reciprocal space coordinates
for k_i in range(k.size):
tmp = 0j
for n in range(N): # loop and sum over unit-cells
for m in range(N):
radius = math.sqrt((n - N_2) ** 2 + (m - N_2) ** 2)
if radius > (N_2):
value = 0j
# continue # Numba isn't working using the same continue pattern as below
else:
strain = e0 * (1 + math.tanh((radius - N_2) / w))
p_n = n + strain * (n - N_2)
p_m = m + strain * (m - N_2)
value = np.exp(2j * cmath.pi * (h[h_i] * p_n + k[k_i] * p_m))
tmp += value
result[h_i, k_i] = abs(tmp) ** 2
return result
| 32.4
| 98
| 0.477195
| 0
| 0
| 0
| 0
| 987
| 0.169239
| 0
| 0
| 1,156
| 0.198217
|
55d367bc88c080acffb11c453ca1f70ffffc2a4c
| 9,300
|
py
|
Python
|
examples/SSTDemoWeightedClauses_Interpret.py
|
jivitesh-sharma/Drop-Clause-Interpretable-TM
|
4fb4d4be0f24a0c30f13fbcca974390889d7fe84
|
[
"MIT"
] | null | null | null |
examples/SSTDemoWeightedClauses_Interpret.py
|
jivitesh-sharma/Drop-Clause-Interpretable-TM
|
4fb4d4be0f24a0c30f13fbcca974390889d7fe84
|
[
"MIT"
] | null | null | null |
examples/SSTDemoWeightedClauses_Interpret.py
|
jivitesh-sharma/Drop-Clause-Interpretable-TM
|
4fb4d4be0f24a0c30f13fbcca974390889d7fe84
|
[
"MIT"
] | null | null | null |
import re
import string
import nltk
from nltk.tokenize import word_tokenize
from string import punctuation
from nltk.corpus import stopwords
nltk.download('punkt')
nltk.download('stopwords')
import pandas as pd
from nltk.stem import PorterStemmer
from nltk import FreqDist
from nltk.tokenize import RegexpTokenizer
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import metrics
from PyTsetlinMachineCUDA.tm import MultiClassTsetlinMachine
nltk.download('wordnet')
from time import time
stop_words = set(stopwords.words('english'))
tokenizerR = RegexpTokenizer(r'\w+')
from numpy import save
from nltk.stem import WordNetLemmatizer
stop_words = set(stopwords.words('english'))
alpha = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
from argparse import ArgumentParser
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
parser = ArgumentParser()
parser.add_argument('-interpret', type=bool, default=False)
parser.add_argument('-n_clauses_per_class', type=int, default=5000)
parser.add_argument('-s', type=float, default=5.0)
parser.add_argument('-T', type=int, default=80)
parser.add_argument('-drop_clause', type=float, default=0.0)
parser.add_argument('-state_bits', type=int, default=8)
parser.add_argument('-features', type=int, default=7500)
parser.add_argument('-gpus', type=int, default=1)
parser.add_argument('-stop_train', type=int, default=250)
config = parser.parse_args()
col_list = ["text", "label"]
df = pd.read_csv('sst2.csv')
label = df.iloc[:,0:1].values
textOrig = df.iloc[:,1:2].values
y = np.reshape(label, len(label))
print(textOrig.shape)
def prepreocess(data):
input_data=[]
vocab = []
for i in data:
for j in i:
j = j.lower()
j = j.replace("\n", "")
j = j.replace('n\'t', 'not')
j = j.replace('\'ve', 'have')
j = j.replace('\'ll', 'will')
j = j.replace('\'re', 'are')
j = j.replace('\'m', 'am')
j = j.replace('/', ' / ')
j = j.replace('-', ' ')
j = j.replace('!', ' ')
j = j.replace('?', ' ')
j = j.replace('+', ' ')
j = j.replace('*', ' ')
while " " in j:
j = j.replace(' ', ' ')
while ",," in j:
j = j.replace(',,', ',')
j = j.strip()
j = j.strip('.')
j = j.strip()
temp1 = tokenizerR.tokenize(j)
temp2 = [x for x in temp1 if not x.isdigit()]
temp3 = [w for w in temp2 if not w in alpha]
#temp4 = [w for w in temp3 if not w in stop_words]
input_data.append(temp3)
return input_data
input_text = prepreocess(textOrig)
inputtext = []
for i in input_text:
ps = PorterStemmer()
temp4 = []
for m in i:
temp_temp =ps.stem(m)
temp4.append(temp_temp)
inputtext.append(temp4)
newVocab =[]
for i in inputtext:
for j in i:
newVocab.append(j)
print(len(newVocab))
fdist1 = FreqDist(newVocab)
tokens1 = fdist1.most_common(config.features)
full_token_fil = []
for i in tokens1:
full_token_fil.append(i[0])
sum1 = 0
for j in tokens1:
sum1 += j[1]
print('sum1', sum1)
vocab_unique = full_token_fil
vocab = np.asarray(full_token_fil)
np.savetxt('sst_vocab.csv', vocab, delimiter=',', fmt='%s')
def binarization_text(data4):
feature_set = np.zeros([len(data4), config.features], dtype=np.uint8)
tnum=0
for t in data4:
for w in t:
if (w in vocab_unique):
idx = vocab_unique.index(w)
feature_set[tnum][idx] = 1
tnum += 1
return feature_set
X_text = binarization_text(inputtext)
print("Text length:", X_text.shape)
tt = 6920
X_train = X_text[0:tt,:]
print("X_train length:", X_train.shape)
X_test = X_text[tt:,:]
print("X_test length:", X_test.shape)
ytrain = y[0:tt]
ytest = y[tt:]
print(ytest.shape)
X_dev = X_text[tt:,:]
Y_dev = y[tt:]
tm1 = MultiClassTsetlinMachine(config.n_clauses_per_class*2, config.T*16, config.s, clause_drop_p=config.drop_clause, number_of_gpus=config.gpus, number_of_state_bits=config.state_bits)
f = open("sst_weighted_%.1f_%d_%d_%.2f_%d_aug.txt" % (s, clauses, T, drop_clause, number_of_state_bits), "w+")
r_25 = 0
r_50 = 0
max = 0.0
for i in range(config.stop_train):
start_training = time()
tm1.fit(X_train, ytrain, epochs=1, incremental=True)
stop_training = time()
start_testing = time()
result2 = 100*(tm1.predict(X_train) == ytrain).mean()
result1 = 100*(tm1.predict(X_test) == ytest).mean()
#result1 = 0
stop_testing = time()
if result1 > max:
max = result1
if i >= 350:
r_50+=result1
if i >= 375:
r_25+=result1
print("#%d AccuracyTrain: %.2f%% AccuracyTest: %.2f%% Training: %.2fs Testing: %.2fs" % (i+1, result2, result1, stop_training-start_training, stop_testing-start_testing), file=f)
print("Average Accuracy last 25 epochs: %.2f \n" %(r_25/25), file=f)
print("Average Accuracy last 50 epochs: %.2f \n" %(r_50/50), file=f)
print("Max Accuracy: %.2f \n" %(max), file=f)
if config.interpret:
print('predicted Class: ', tm1.predict(X_train[4245:4246,:]))
triggClause = tm1.transform(X_train[4245:4246,:])
clauseIndex = []
for i in range(len(triggClause[0])):
if triggClause[0][i] ==1:
clauseIndex.append(i)
import nltk
from nltk.probability import FreqDist
originalFeatures = []
negatedFeatures = []
number_of_features = 1000
for j in range(0, 1500, 2):
#print("Clause #%d (%d): " % (j, tm1.get_weight(1, j)), end=' ')
l = []
for k in range(number_of_features*2):
if tm1.ta_action(0, j, k) == 1:
if k < number_of_features:
l.append(" x%d" % (k))
originalFeatures.append(k)
else:
l.append("¬x%d" % (k-number_of_features))
negatedFeatures.append(k-number_of_features)
#print(" ∧ ".join(l))
fdist1 = FreqDist(negatedFeatures)
negatedWords = fdist1.most_common(200)
fdist2 = FreqDist(originalFeatures)
originalWords = fdist2.most_common(20)
print('full original word')
fulloriginalword=[]
for i in originalWords:
fulloriginalword.append(i[0])
fullnegatedword =[]
print('full negated word')
for i in negatedWords:
fullnegatedword.append(i[0])
originalFeatures2 = []
negatedFeatures2= []
for j in clauseIndex:
if j < 1500 and j%2==0:
#print("Clause #%d (%d): " % (j, tm1.get_weight(1, j)), end=' ')
l = []
for k in range(number_of_features*2):
if tm1.ta_action(0, j, k) == 1:
if k < number_of_features:
l.append(" x%d" % (k))
originalFeatures2.append(k)
else:
l.append("¬x%d" % (k-number_of_features))
negatedFeatures2.append(k-number_of_features)
fdist3 = FreqDist(negatedFeatures2)
negatedWords2 = fdist3.most_common(100)
fdist4 = FreqDist(originalFeatures2)
originalWords2 = fdist4.most_common(10)
neededoriginalword =[]
print('needed original word')
for i in originalWords2:
neededoriginalword.append(i[0])
needednegatedword =[]
print('needed negated word')
for i in negatedWords2:
needednegatedword.append(i[0])
#Save fulloriginalword, fullnegatedword, neededoriginalword, or needednegatedword (Preferred needednegatedword for interpretability)
interpretList = np.asarray(needednegatedword)
np.savetxt('interpretFile.csv', interpretList, fmt='%s')
df = pd.read_csv('interpretFile.csv', dtype=str, header=None)
df1 = df.iloc[:,:]
full1 = df.iloc[:,:].values
#full1= np.reshape(full1,(10,20))
index = np.arange(100)
letter2num = {}
for i in range(len(index)):
letter2num[full1[i][0]] =i
print(letter2num)
df2 = pd.DataFrame(np.array( [letter2num[i] for i in df1.values.flat] ).reshape(df1.shape))
print(df2)
colors = ["white"] # use hex colors here, if desired.
cmap = ListedColormap(colors)
full2 = df.iloc[:,:].values
full2= np.reshape(full2,(10,10))
full3 = df2.iloc[:,:].values
full3= np.reshape(full3,(10,10))
fig, ax = plt.subplots()
ax.imshow(full3,cmap='YlOrBr_r')
for i in range(len(full2)):
for j in range(10):
ax.text(j,i, full2[i,j], ha="center", va="center")
plt.axis('off')
ax.set_aspect(0.3)
plt.grid(True)
plt.show()
| 30.097087
| 186
| 0.576882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,311
| 0.140907
|
55d3a610da3467d16c45533e5d12b2a9f0ad38ba
| 1,457
|
py
|
Python
|
adbc/zql/builders/core.py
|
aleontiev/apg
|
c6a10a9b0a576913c63ed4f093e2a0fa7469af87
|
[
"MIT"
] | 2
|
2020-07-17T16:33:42.000Z
|
2020-07-21T04:48:38.000Z
|
adbc/zql/builders/core.py
|
aleontiev/apg
|
c6a10a9b0a576913c63ed4f093e2a0fa7469af87
|
[
"MIT"
] | null | null | null |
adbc/zql/builders/core.py
|
aleontiev/apg
|
c6a10a9b0a576913c63ed4f093e2a0fa7469af87
|
[
"MIT"
] | null | null | null |
from adbc.zql.validator import Validator
class Builder(Validator):
INDENT = 4
IDENTIFIER_SPLIT_CHARACTER = '.'
WHITESPACE_CHARACTER = ' '
WILDCARD_CHARACTER = '*'
QUOTE_CHARACTERS = {'"', "'", '`'}
RAW_QUOTE_CHARACTER = '`'
COMMANDS = {
'select',
'insert',
'update',
'delete',
'truncate',
'create',
'alter',
'drop',
'show',
'explain',
'set'
}
OPERATOR_REWRITES = {}
OPERATORS = {
'not': 1,
'!!': 1,
'is': 2,
'is null': {
'arguments': 1,
'binds': 'right'
},
'is not null': {
'arguments': 1,
'binds': 'right'
},
'!': {
'arguments': 1,
'binds': 'right'
},
'@': 1,
'|/': 1,
'=': 2,
'+': 2,
'*': 2,
'-': 2,
'/': 2,
'%': 2,
'^': 2,
'#': 2,
'~': 1,
'>>': 2,
'&': 2,
'<<': 2,
'|': 2,
'||': 2,
'<': 2,
'<=': 2,
'-': 2,
'!=': 2,
'<>': 2,
'like': 2,
'ilike': 2,
'~~': 2,
'!~~': 2,
'>': 2,
'>=': 2,
'and': 2,
'or': 2,
}
# TODO: handle non-functional clause expressions
# like CASE, BETWEEN, etc
CLAUSES = {
'case',
'between'
}
| 18.922078
| 52
| 0.315031
| 1,413
| 0.969801
| 0
| 0
| 0
| 0
| 0
| 0
| 413
| 0.283459
|
55d3b92efdbe3c9a4d84e47ec3fda8ecb4588bca
| 426
|
py
|
Python
|
setup.py
|
InTheMorning/python-bme280
|
47af2784c937bed429d8986b5205b495e03d74f2
|
[
"MIT"
] | null | null | null |
setup.py
|
InTheMorning/python-bme280
|
47af2784c937bed429d8986b5205b495e03d74f2
|
[
"MIT"
] | null | null | null |
setup.py
|
InTheMorning/python-bme280
|
47af2784c937bed429d8986b5205b495e03d74f2
|
[
"MIT"
] | null | null | null |
from setuptools import setup
setup(name='bme280',
version='1.0.0',
packages=['bme280'],
install_requires=['smbus2'],
python_requires='>=2.7',
url='https://dev.mycrobase.de/gitea/cn/python-bme280',
author='Christian Nicolai',
description='A python library for accessing the BME280 combined humidity and pressure sensor from Bosch.',
long_description=open('README.md').read())
| 30.428571
| 112
| 0.671362
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 210
| 0.492958
|
55d3d277d3db0f3730f055eade9ab037ac954a49
| 1,190
|
py
|
Python
|
List/learnlist.py
|
shahasifbashir/LearnPython
|
4ce6b81d66ea7bbf0a40427871daa4e563b6a184
|
[
"MIT"
] | null | null | null |
List/learnlist.py
|
shahasifbashir/LearnPython
|
4ce6b81d66ea7bbf0a40427871daa4e563b6a184
|
[
"MIT"
] | null | null | null |
List/learnlist.py
|
shahasifbashir/LearnPython
|
4ce6b81d66ea7bbf0a40427871daa4e563b6a184
|
[
"MIT"
] | null | null | null |
# A simple list
myList = [10,20,4,5,6,2,9,10,2,3,34,14]
#print the whole list
print("The List is {}".format(myList))
# printing elemts of the list one by one
print("printing elemts of the list one by one")
for elements in myList:
print(elements)
print("")
#printing elements that are greater than 10 only
print("printing elements that are greater than 10 only")
for elements in myList:
if(elements>10):
print(elements)
#printing elements that are greater that 10 but by using a list and appending the elements on it
newList = []
for elements in myList:
if(elements <10):
newList.append(elements)
print("")
print("Print the new List \n{}".format(newList))
#print the above list part using a single line
print(" The list is {}".format([item for item in myList if item < 10]))
# here [item { This is the out put} for item { the is the for part} in myList {This Is the input list} if item <10 {This is the condition}]
#Ask the user for an input and print the elemets of list less than that number
print("Input a number : ")
num = int(input())
print(" The elemnts of the list less that {} are {}".format(num,[item for item in myList if item < num]))
| 25.869565
| 139
| 0.696639
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 699
| 0.587395
|
55d68de8c22f2deefdb481f4a73d47295a2e3b27
| 870
|
py
|
Python
|
pmapi/app.py
|
jbushman/primemirror-api
|
4844d57b5581a2d537996c77eec65956ef5f1dc9
|
[
"Apache-2.0"
] | null | null | null |
pmapi/app.py
|
jbushman/primemirror-api
|
4844d57b5581a2d537996c77eec65956ef5f1dc9
|
[
"Apache-2.0"
] | null | null | null |
pmapi/app.py
|
jbushman/primemirror-api
|
4844d57b5581a2d537996c77eec65956ef5f1dc9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
from pmapi.config import Config, get_logger
import os
import logging
import requests
import connexion
from flask import Flask, request
logger = get_logger()
# if not Config.TOKEN:
# data = {
# "hostname": Config.HOSTNAME,
# "ip": Config.IP,
# "state": Config.STATE,
# "url": Config.URL,
# "service_type": Config.SERVICE_TYPE,
# "roles": "'service', 'primemirror'",
# }
# logging.info("Registering Service: ".format(data))
# r = requests.post("{}/register/service".format(Config.DEPLOYMENT_API_URI), json=data, verify=False)
# resp = r.json()
# if "TOKEN" in resp:
# update_env("TOKEN", resp["TOKEN"])
flask_app = connexion.FlaskApp(__name__)
flask_app.add_api("openapi.yaml", validate_responses=True, strict_validation=True)
app = flask_app.app
app.config.from_object(Config)
| 24.857143
| 104
| 0.670115
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 529
| 0.608046
|
55d7d78c6937d21c0eddc062cc73761c958ba202
| 1,175
|
py
|
Python
|
python/setup.py
|
chrisdembia/StateMint
|
53fdaabc7ba83fb477523ae9b79ccc964e791080
|
[
"BSD-3-Clause"
] | null | null | null |
python/setup.py
|
chrisdembia/StateMint
|
53fdaabc7ba83fb477523ae9b79ccc964e791080
|
[
"BSD-3-Clause"
] | null | null | null |
python/setup.py
|
chrisdembia/StateMint
|
53fdaabc7ba83fb477523ae9b79ccc964e791080
|
[
"BSD-3-Clause"
] | null | null | null |
import setuptools
with open('README.md') as f:
long_description=f.read()
setuptools.setup(
name="StateMint",
version="1.0.0",
author="Cameron Devine",
author_email="camdev@uw.edu",
description="A library for finding State Space models of dynamical systems.",
long_description=long_description,
long_description_content_type='text/markdown',
url="https://github.com/CameronDevine/StateMint",
packages=setuptools.find_packages(),
python_requires=">=2.7",
install_requires=("sympy>=0.7.3",),
classifiers=(
"Development Status :: 4 - Beta",
"Framework :: Jupyter",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.0",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Operating System :: OS Independent",
),
)
| 31.756757
| 78
| 0.691064
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 791
| 0.673191
|
55d8e1c6fdbebec334001ecd1716470ce185570d
| 1,001
|
py
|
Python
|
cha_bebe/presente/migrations/0001_initial.py
|
intelektos/Cha_bebe
|
23df4af3901413c9c50e73bd305ade165c81001b
|
[
"MIT"
] | null | null | null |
cha_bebe/presente/migrations/0001_initial.py
|
intelektos/Cha_bebe
|
23df4af3901413c9c50e73bd305ade165c81001b
|
[
"MIT"
] | 9
|
2020-06-08T03:31:08.000Z
|
2022-01-13T02:44:42.000Z
|
cha_bebe/presente/migrations/0001_initial.py
|
intelektos/Cha_bebe
|
23df4af3901413c9c50e73bd305ade165c81001b
|
[
"MIT"
] | 1
|
2020-06-01T17:43:20.000Z
|
2020-06-01T17:43:20.000Z
|
# Generated by Django 3.0.6 on 2020-05-14 18:13
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Presente',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('titulo', models.CharField(max_length=100)),
('slug', models.SlugField(blank=True, max_length=100, unique=True)),
('descricao', models.TextField(blank=True, null=True)),
('valor', models.FloatField(blank=True, null=True)),
('imagem', models.ImageField(blank=True, null=True, upload_to='presentes/imagens')),
('thumbnail', models.ImageField(blank=True, null=True, upload_to='presentes/thumbnail')),
],
options={
'ordering': ('titulo',),
},
),
]
| 33.366667
| 114
| 0.566434
| 908
| 0.907093
| 0
| 0
| 0
| 0
| 0
| 0
| 174
| 0.173826
|
55da18f8f5bba77168080eaa5260eeadfe4bb7f4
| 2,376
|
py
|
Python
|
src/rekognition_online_action_detection/models/feature_head.py
|
amazon-research/long-short-term-transformer
|
a425be4b52ab68fddd85c91d26571e4cdfe8379a
|
[
"Apache-2.0"
] | 52
|
2021-11-19T01:35:10.000Z
|
2022-03-24T11:48:10.000Z
|
src/rekognition_online_action_detection/models/feature_head.py
|
amazon-research/long-short-term-transformer
|
a425be4b52ab68fddd85c91d26571e4cdfe8379a
|
[
"Apache-2.0"
] | 9
|
2021-11-24T18:50:13.000Z
|
2022-03-10T05:13:53.000Z
|
src/rekognition_online_action_detection/models/feature_head.py
|
amazon-research/long-short-term-transformer
|
a425be4b52ab68fddd85c91d26571e4cdfe8379a
|
[
"Apache-2.0"
] | 8
|
2022-01-15T08:01:33.000Z
|
2022-03-20T22:08:29.000Z
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
__all__ = ['build_feature_head']
import torch
import torch.nn as nn
from rekognition_online_action_detection.utils.registry import Registry
FEATURE_HEADS = Registry()
FEATURE_SIZES = {
'rgb_anet_resnet50': 2048,
'flow_anet_resnet50': 2048,
'rgb_kinetics_bninception': 1024,
'flow_kinetics_bninception': 1024,
'rgb_kinetics_resnet50': 2048,
'flow_kinetics_resnet50': 2048,
}
@FEATURE_HEADS.register('THUMOS')
@FEATURE_HEADS.register('TVSeries')
class BaseFeatureHead(nn.Module):
def __init__(self, cfg):
super(BaseFeatureHead, self).__init__()
if cfg.INPUT.MODALITY in ['visual', 'motion', 'twostream']:
self.with_visual = 'motion' not in cfg.INPUT.MODALITY
self.with_motion = 'visual' not in cfg.INPUT.MODALITY
else:
raise RuntimeError('Unknown modality of {}'.format(cfg.INPUT.MODALITY))
if self.with_visual and self.with_motion:
visual_size = FEATURE_SIZES[cfg.INPUT.VISUAL_FEATURE]
motion_size = FEATURE_SIZES[cfg.INPUT.MOTION_FEATURE]
fusion_size = visual_size + motion_size
elif self.with_visual:
fusion_size = FEATURE_SIZES[cfg.INPUT.VISUAL_FEATURE]
elif self.with_motion:
fusion_size = FEATURE_SIZES[cfg.INPUT.MOTION_FEATURE]
self.d_model = fusion_size
if cfg.MODEL.FEATURE_HEAD.LINEAR_ENABLED:
if cfg.MODEL.FEATURE_HEAD.LINEAR_OUT_FEATURES != -1:
self.d_model = cfg.MODEL.FEATURE_HEAD.LINEAR_OUT_FEATURES
self.input_linear = nn.Sequential(
nn.Linear(fusion_size, self.d_model),
nn.ReLU(inplace=True),
)
else:
self.input_linear = nn.Identity()
def forward(self, visual_input, motion_input):
if self.with_visual and self.with_motion:
fusion_input = torch.cat((visual_input, motion_input), dim=-1)
elif self.with_visual:
fusion_input = visual_input
elif self.with_motion:
fusion_input = motion_input
fusion_input = self.input_linear(fusion_input)
return fusion_input
def build_feature_head(cfg):
feature_head = FEATURE_HEADS[cfg.DATA.DATA_NAME]
return feature_head(cfg)
| 33.942857
| 83
| 0.673822
| 1,681
| 0.707492
| 0
| 0
| 1,751
| 0.736953
| 0
| 0
| 349
| 0.146886
|
55dae12ae7fedf07888052fca21d9aabf3ce95df
| 1,367
|
py
|
Python
|
main.py
|
klarman-cell-observatory/cirrocumulus-app-engine
|
52997ae790773364591ab8d7c747e4505700373b
|
[
"BSD-3-Clause"
] | null | null | null |
main.py
|
klarman-cell-observatory/cirrocumulus-app-engine
|
52997ae790773364591ab8d7c747e4505700373b
|
[
"BSD-3-Clause"
] | 1
|
2021-04-13T14:52:39.000Z
|
2021-04-13T15:53:34.000Z
|
main.py
|
klarman-cell-observatory/cirrocumulus-app-engine
|
52997ae790773364591ab8d7c747e4505700373b
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import sys
sys.path.append('lib')
from flask import Flask, send_from_directory
import cirrocumulus
from cirrocumulus.cloud_firestore_native import CloudFireStoreNative
from cirrocumulus.api import blueprint
from cirrocumulus.envir import CIRRO_AUTH_CLIENT_ID, CIRRO_AUTH, CIRRO_DATABASE, CIRRO_DATASET_PROVIDERS
from cirrocumulus.google_auth import GoogleAuth
from cirrocumulus.no_auth import NoAuth
from cirrocumulus.util import add_dataset_providers
client_path = os.path.join(cirrocumulus.__path__[0], 'client')
# If `entrypoint` is not defined in app.yaml, App Engine will look for an app
# called `app` in `main.py`.
app = Flask(__name__, static_folder=client_path, static_url_path='')
app.register_blueprint(blueprint, url_prefix='/api')
@app.route('/')
def root():
return send_from_directory(client_path, "index.html")
if os.environ.get(CIRRO_AUTH_CLIENT_ID) is not None:
app.config[CIRRO_AUTH] = GoogleAuth(os.environ.get(CIRRO_AUTH_CLIENT_ID))
else:
app.config[CIRRO_AUTH] = NoAuth()
app.config[CIRRO_DATABASE] = CloudFireStoreNative()
os.environ[CIRRO_DATASET_PROVIDERS] = ','.join(['cirrocumulus.zarr_dataset.ZarrDataset',
'cirrocumulus.parquet_dataset.ParquetDataset'])
add_dataset_providers()
if __name__ == '__main__':
app.run(host='127.0.0.1', port=5000, debug=True)
| 33.341463
| 104
| 0.766642
| 0
| 0
| 0
| 0
| 85
| 0.06218
| 0
| 0
| 249
| 0.182151
|
55db43f69d53783216fd36c9fb7e70e68c557460
| 823
|
py
|
Python
|
utils/load_externals.py
|
uvasrg/FeatureSqueezing
|
8448fbff07bf03ff81a52dbd7e014d5733035f56
|
[
"MIT"
] | 56
|
2017-05-19T23:30:13.000Z
|
2021-11-16T09:15:48.000Z
|
utils/load_externals.py
|
pengpengqiao/FeatureSqueezing
|
5ca04dc704dda578df53f5234f4dabbfc3e3ec62
|
[
"MIT"
] | 1
|
2018-03-12T03:47:45.000Z
|
2018-03-12T03:47:45.000Z
|
utils/load_externals.py
|
pengpengqiao/FeatureSqueezing
|
5ca04dc704dda578df53f5234f4dabbfc3e3ec62
|
[
"MIT"
] | 19
|
2017-06-11T08:33:19.000Z
|
2022-01-03T09:46:44.000Z
|
import sys, os
external_libs = {'Cleverhans v1.0.0': "externals/cleverhans",
'Tensorflow-Model-Resnet': "externals/tensorflow-models",
}
project_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
for lib_name, lib_path in external_libs.iteritems():
lib_path = os.path.join(project_path, lib_path)
if os.listdir(lib_path) == []:
cmd = "git submodule update --init --recursive"
print("Fetching external libraries...")
os.system(cmd)
if lib_name == 'Tensorflow-Model-Resnet':
lib_token_fpath = os.path.join(lib_path, 'resnet', '__init__.py')
if not os.path.isfile(lib_token_fpath):
open(lib_token_fpath, 'a').close()
sys.path.append(lib_path)
print("Located %s" % lib_name)
# print (sys.path)
| 32.92
| 75
| 0.64277
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 247
| 0.300122
|
55dc16af3929e96db5e96a0d381158d79e762fbd
| 2,333
|
py
|
Python
|
research/seq_flow_lite/utils/misc_utils.py
|
hjkim-haga/TF-OD-API
|
22ac477ff4dfb93fe7a32c94b5f0b1e74330902b
|
[
"Apache-2.0"
] | null | null | null |
research/seq_flow_lite/utils/misc_utils.py
|
hjkim-haga/TF-OD-API
|
22ac477ff4dfb93fe7a32c94b5f0b1e74330902b
|
[
"Apache-2.0"
] | null | null | null |
research/seq_flow_lite/utils/misc_utils.py
|
hjkim-haga/TF-OD-API
|
22ac477ff4dfb93fe7a32c94b5f0b1e74330902b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
"""A module for miscelaneous utils."""
import tensorflow as tf
def random_substr(str_tensor, max_words):
"""Select random substring if the input has more than max_words."""
word_batch_r = tf.strings.split(str_tensor)
row_splits = word_batch_r.row_splits
words = word_batch_r.values
start_idx = row_splits[:-1]
end_idx = row_splits[1:]
words_per_example = end_idx - start_idx
ones = tf.ones_like(end_idx)
max_val = tf.maximum(ones, words_per_example - max_words)
max_words_batch = tf.reduce_max(words_per_example)
rnd = tf.random.uniform(
tf.shape(start_idx), minval=0, maxval=max_words_batch, dtype=tf.int64)
off_start_idx = tf.math.floormod(rnd, max_val)
new_words_per_example = tf.where(
tf.equal(max_val, 1), words_per_example, ones * max_words)
new_start_idx = start_idx + off_start_idx
new_end_idx = new_start_idx + new_words_per_example
indices = tf.expand_dims(tf.range(tf.size(words), dtype=tf.int64), axis=0)
within_limit = tf.logical_and(
tf.greater_equal(indices, tf.expand_dims(new_start_idx, axis=1)),
tf.less(indices, tf.expand_dims(new_end_idx, axis=1)))
keep_indices = tf.reduce_any(within_limit, axis=0)
keep_indices = tf.cast(keep_indices, dtype=tf.int32)
_, selected_words = tf.dynamic_partition(words, keep_indices, 2)
row_splits = tf.math.cumsum(new_words_per_example)
row_splits = tf.concat([[0], row_splits], axis=0)
new_tensor = tf.RaggedTensor.from_row_splits(
values=selected_words, row_splits=row_splits)
return tf.strings.reduce_join(new_tensor, axis=1, separator=" ")
| 46.66
| 81
| 0.714102
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 815
| 0.349336
|
55dc932db8d55326783afe7c9ef113e659643f67
| 2,503
|
py
|
Python
|
parc/pra__/incomplete_13910.py
|
KwanHoo/Data-Structure__Algorithm
|
b985f8b41a366b9c028da711ea43a643151268e2
|
[
"MIT"
] | null | null | null |
parc/pra__/incomplete_13910.py
|
KwanHoo/Data-Structure__Algorithm
|
b985f8b41a366b9c028da711ea43a643151268e2
|
[
"MIT"
] | null | null | null |
parc/pra__/incomplete_13910.py
|
KwanHoo/Data-Structure__Algorithm
|
b985f8b41a366b9c028da711ea43a643151268e2
|
[
"MIT"
] | null | null | null |
## 백준 13910번
## 개업
## 다이나믹 프로그래밍
## (짜장면 데이)
'''
##! ex) N = 4, 5그릇 이상 요리 X, 4사이즈 윅에 3그릇 이하 요리 X => 4윅에 4개
##* ex) N = 5, 윅 사이즈 1,3 / first : 1+3 = 4 그릇, second : 1 => 5 그릇 --> 2번의 요리로 주문 처리
##* 주문 받은 짜장면의 수, 가지고 있는 윅의 크기 => 주문 처리
# In1 ) N M : (주문 받은 짜장면의 수) N | (가지고 있는 윅의 수) M
# In2 ) S : 윅의 크기 S가 M개 만큼 주어짐 (같은 크기의 윅을 여러개 가지고 있을 수 있음)
# out ) 혜빈이가 모든 주문을 처리하기 위해 해야 하는 최소 요리수 | 주문 처리 못할시 -1
'''
'''
ex1I) 5주문 2개윅
ex1I) 1과 3사이즈
out ) 2
ex2I) 6주문 2개윅
ex2I) 1과 3사이즈
out ) 2
5 2
2 4
=> 4|1 1<2 : -1
13 3
'''
import sys
## 프로토타입
def cooking(N,M,wig):
count = 0
temp = N
breakpoint = False
while temp != 0:
for i in range(M-1, -1, -1):
if wig[i] < temp or wig[i] == temp:
temp -= wig[i]
count += 1
else:
continue
if i == 0:
k = wig[0]
if temp % k == 0:
count = count + (temp//k)
else:
breakpoint = True
count = -1
if breakpoint == True:
break
return count
## 테스트 1 성공, 2 실패
def cooking2(N, M, wig):
temp = N
count = 0
while temp != 0:
## 기저 조건
if wig[0] > temp:
count = -1
break
for i in range(M-1, -1, -1): ## M-1인덱스 부터 0 까지
if wig[i] < temp or wig[i] == temp:
temp -= wig[i]
count += 1
return count
## 미완성
def cooking4(N, M, wig):
temp = N
count = 0
while temp != 0:
## 기저 조건
if wig[0] > temp:
count = -1
break
for j in range(M-1, -1, -1):
## 반복
if temp % wig[j] > 1: ## 7 = 3*2 + 1 ## 8 = 3*2 + 1*2
if temp % wig[j] == 0: ## 6 = 3*2
count += temp // wig[j]
temp = 0
break
else:
## 반복 고려 X
for i in range(M-1, -1, -1): # M-1인덱스 부터 0 까지
if wig[i] < temp or wig[i] == temp:
temp -= wig[i]
count += 1
return count
if __name__ == "__main__":
print('hello')
N, M = map(int, sys.stdin.readline().split())
wig = list(map(int, sys.stdin.readline().split()))
wig.sort()# 정렬 때리기
# print(wig)
# print(cooking(N, M, wig)) ## 제대로 안나옴
# print(cooking2(N,M,wig)) ## 테스트 케이스 2 반복 처리 안됨
print(cooking4(N,M,wig)) ## 미완성
| 20.68595
| 83
| 0.411506
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,273
| 0.426752
|
55dce36c7d1bd205aea80744f2bd0ceb8afc6832
| 1,169
|
py
|
Python
|
manage/db_logger.py
|
ReanGD/web-home-manage
|
bbc5377a1f7fde002442fee7720e4ab9e9ad22b3
|
[
"Apache-2.0"
] | null | null | null |
manage/db_logger.py
|
ReanGD/web-home-manage
|
bbc5377a1f7fde002442fee7720e4ab9e9ad22b3
|
[
"Apache-2.0"
] | null | null | null |
manage/db_logger.py
|
ReanGD/web-home-manage
|
bbc5377a1f7fde002442fee7720e4ab9e9ad22b3
|
[
"Apache-2.0"
] | null | null | null |
import sys
import traceback
from manage.models import LoadLog
class DbLogger(object):
def __init__(self, rec_id=None):
if rec_id is None:
self.rec = LoadLog.objects.create()
else:
self.rec = LoadLog.objects.get(pk=int(rec_id))
def remove_torrent(self):
if self.rec.torent_ptr is not None:
for it in LoadLog.objects.filter(torent_ptr=self.rec.torent_ptr):
it.torent_ptr = None
it.save()
def id(self):
return self.rec.id
def json_result(self):
return {'result': self.rec.result, 'text': self.rec.text}
def text(self):
return self.rec.text
def write(self, msg):
self.rec.text += ("\n" + msg)
self.rec.save()
def set_result(self, result):
self.rec.result = result
self.rec.save()
def set_torrent(self, t):
self.torent_ptr = t
self.rec.save()
def exception(self):
e_type, e_value, e_traceback = sys.exc_info()
s = "\n".join(traceback.format_exception(e_type, e_value, e_traceback))
self.write(s)
self.set_result(LoadLog.RES_FAILED)
| 25.977778
| 79
| 0.597092
| 1,104
| 0.944397
| 0
| 0
| 0
| 0
| 0
| 0
| 22
| 0.01882
|
55dcf3dd3bd27fb171fb592911ad357dd0bb432c
| 5,623
|
py
|
Python
|
api/src/result_handler.py
|
Aragos/tichu-tournament
|
4cdf727a30af8820ad56fe3097ec9a8e84892068
|
[
"MIT"
] | 7
|
2016-12-12T02:29:42.000Z
|
2020-05-12T21:21:21.000Z
|
api/src/result_handler.py
|
Aragos/tichu-tournament
|
4cdf727a30af8820ad56fe3097ec9a8e84892068
|
[
"MIT"
] | 31
|
2017-01-05T06:07:28.000Z
|
2018-05-27T13:13:06.000Z
|
api/src/result_handler.py
|
Aragos/tichu-tournament
|
4cdf727a30af8820ad56fe3097ec9a8e84892068
|
[
"MIT"
] | 3
|
2017-12-21T23:30:12.000Z
|
2019-01-03T20:51:52.000Z
|
import webapp2
import json
from generic_handler import GenericHandler
from python.calculator import Calculate
from python.calculator import GetMaxRounds
from google.appengine.api import users
from handler_utils import BuildMovementAndMaybeSetStatus
from handler_utils import CheckUserOwnsTournamentAndMaybeReturnStatus
from handler_utils import GetTourneyWithIdAndMaybeReturnStatus
from handler_utils import SetErrorStatus
from python.jsonio import ReadJSONInput
from python.jsonio import OutputJSON
from python.xlsxio import WriteResultsToXlsx
from python.xlsxio import OutputWorkbookAsBytesIO
from models import PlayerPair
from models import Tournament
def GetPlayerListForTourney(tourney):
''' Returns a list of tuples of names for every pair.'''
name_list = range(1, tourney.no_pairs + 1)
for player_pair in PlayerPair.query(ancestor=tourney.key).fetch():
if player_pair.players:
player_list = player_pair.player_list()
if not player_list:
continue
elif len(player_list) == 1:
name_list[player_pair.pair_no - 1] = (player_list[0].get("name"),
None)
else:
name_list[player_pair.pair_no - 1] = (player_list[0].get("name"),
player_list[1].get("name"))
else:
name_list[player_pair.pair_no - 1] = (None, None)
return name_list
class CompleteScoringHandler(GenericHandler):
''' Handles calls to /api/tournament/:id/handStatus '''
def get(self, id):
tourney = GetTourneyWithIdAndMaybeReturnStatus(self.response, id)
if not tourney:
return
if not CheckUserOwnsTournamentAndMaybeReturnStatus(self.response,
users.get_current_user(), tourney):
return
movement = BuildMovementAndMaybeSetStatus(
self.response, tourney.no_pairs, tourney.no_boards,
tourney.legacy_version_id)
if not movement:
return
name_list= GetPlayerListForTourney(tourney)
scored_hands = self._TuplesToDict(tourney.ScoredHands())
unscored_hands = []
round_list = []
for round_no in xrange (1, movement.GetNumRounds() + 1):
round_dict = {}
round_dict["round"] = round_no
round_dict["scored_hands"] = []
round_dict["unscored_hands"] = []
for team_no in xrange(1, tourney.no_pairs + 1):
round = movement.GetMovement(team_no)[round_no - 1]
hands = round.hands
if not hands or not round.is_north:
continue
for hand in hands:
hand_dict = {"hand" : hand, "ns_pair": team_no,
"ns_names": list(name_list[team_no - 1]),
"ew_pair" : round.opponent,
"ew_names": list(name_list[round.opponent - 1]),
"table" : round.table }
if hand in scored_hands.get(team_no, []):
scored_unscored = "scored_hands"
else:
scored_unscored = "unscored_hands"
round_dict[scored_unscored].append(hand_dict)
round_dict["scored_hands"].sort(key=lambda x : x["table"])
round_dict["unscored_hands"].sort(key=lambda x : x["table"])
round_dict["scored_hands"].sort(key=lambda x : x["hand"])
round_dict["unscored_hands"].sort(key=lambda x : x["hand"])
round_list.append(round_dict)
self.response.headers['Content-Type'] = 'application/json'
self.response.set_status(200)
self.response.out.write(json.dumps({"rounds" : round_list }, indent=2))
def _TuplesToDict(self, hands):
''' Take tuples representing each hand and dump them into a per-pair dict.
Args:
hands: list of tuples (hand, ns_pair, ew_pair).
Returns:
Dictionary from team to list of hand numbers already played.
'''
ret = {}
for hand in hands:
ret.setdefault(hand[1], []).append(hand[0])
ret.setdefault(hand[2], []).append(hand[0])
return ret
class ResultHandler(GenericHandler):
def get(self, id):
tourney = GetTourneyWithIdAndMaybeReturnStatus(self.response, id)
if not tourney:
return
if not CheckUserOwnsTournamentAndMaybeReturnStatus(self.response,
users.get_current_user(), tourney):
return
hand_list = tourney.GetScoredHandList()
boards = ReadJSONInput(hand_list)
summaries = Calculate(boards, GetMaxRounds(boards))
self.response.headers['Content-Type'] = 'application/json'
self.response.set_status(200)
self.response.out.write(OutputJSON(hand_list, summaries))
class XlxsResultHandler(GenericHandler):
def get(self, id):
tourney = GetTourneyWithIdAndMaybeReturnStatus(self.response, id)
if not tourney:
return
if not CheckUserOwnsTournamentAndMaybeReturnStatus(self.response,
users.get_current_user(), tourney):
return
boards = ReadJSONInput(tourney.GetScoredHandList())
max_rounds = GetMaxRounds(boards)
summaries = Calculate(boards, max_rounds)
mp_summaries = summaries
ap_summaries = summaries
boards.sort(key=lambda bs : bs._board_no, reverse = False)
wb = WriteResultsToXlsx(max_rounds, mp_summaries, ap_summaries, boards,
name_list=GetPlayerListForTourney(tourney))
self.response.out.write(OutputWorkbookAsBytesIO(wb).getvalue())
self.response.headers['Content-Type'] = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
self.response.headers['Content-disposition'] = str('attachment; filename=' +
tourney.name + 'TournamentResults.xlsx')
self.response.headers['Content-Transfer-Encoding'] = 'Binary'
self.response.set_status(200)
| 38.251701
| 111
| 0.686111
| 4,223
| 0.751023
| 0
| 0
| 0
| 0
| 0
| 0
| 817
| 0.145296
|
55de8a6657e59552d97157f0e3318b5e7abae0d2
| 323
|
py
|
Python
|
electsysApi/shared/exception.py
|
yuxiqian/electsys-api
|
52b42729e797f8bdf6a0827e9d62a50919d56d65
|
[
"MIT"
] | 5
|
2019-01-21T00:44:33.000Z
|
2022-01-03T16:45:25.000Z
|
electsysApi/shared/exception.py
|
yuxiqian/electsys-api
|
52b42729e797f8bdf6a0827e9d62a50919d56d65
|
[
"MIT"
] | 1
|
2021-10-24T00:46:59.000Z
|
2021-10-24T00:46:59.000Z
|
electsysApi/shared/exception.py
|
yuxiqian/electsys-api
|
52b42729e797f8bdf6a0827e9d62a50919d56d65
|
[
"MIT"
] | 2
|
2019-01-12T03:18:33.000Z
|
2021-06-16T11:19:49.000Z
|
#!/usr/bin/env python
# encoding: utf-8
'''
@author: yuxiqian
@license: MIT
@contact: akaza_akari@sjtu.edu.cn
@software: electsys-api
@file: electsysApi/shared/exception.py
@time: 2019/1/9
'''
class RequestError(BaseException):
pass
class ParseError(BaseException):
pass
class ParseWarning(Warning):
pass
| 14.043478
| 38
| 0.721362
| 121
| 0.374613
| 0
| 0
| 0
| 0
| 0
| 0
| 190
| 0.588235
|
55ded0b36a3a4b147484ae30e7276b05b17dc456
| 2,375
|
py
|
Python
|
src/CryptoPlus/Cipher/ARC2.py
|
voytecPL/pycryptoplus
|
86905bbb8661e00cfb2afdc4461d4a79b6429d8a
|
[
"MIT"
] | 1
|
2022-02-27T17:46:18.000Z
|
2022-02-27T17:46:18.000Z
|
src/CryptoPlus/Cipher/ARC2.py
|
voytecPL/pycryptoplus
|
86905bbb8661e00cfb2afdc4461d4a79b6429d8a
|
[
"MIT"
] | null | null | null |
src/CryptoPlus/Cipher/ARC2.py
|
voytecPL/pycryptoplus
|
86905bbb8661e00cfb2afdc4461d4a79b6429d8a
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from .blockcipher import *
import Crypto.Cipher.ARC2
import Crypto
from pkg_resources import parse_version
def new(key,mode=MODE_ECB,IV=None,counter=None,segment_size=None,effective_keylen=None):
"""Create a new cipher object
ARC2 using pycrypto for algo and pycryptoplus for ciphermode
key = raw string containing the keys
mode = python_AES.MODE_ECB/CBC/CFB/OFB/CTR/CMAC, default is ECB
IV = IV as a raw string, default is "all zero" IV
-> only needed for CBC mode
counter = counter object (CryptoPlus.Util.util.Counter)
-> only needed for CTR mode
segment_size = amount of bits to use from the keystream in each chain part
-> supported values: multiple of 8 between 8 and the blocksize
of the cipher (only per byte access possible), default is 8
-> only needed for CFB mode
effective_keylen = how much bits to effectively use from the supplied key
-> will only be used when the pycrypto version on your system is >2.0.1
EXAMPLES:
**********
IMPORTING:
-----------
>>> import codecs
>>> from CryptoPlus.Cipher import ARC2
http://www.ietf.org/rfc/rfc2268.txt
Doctest will fail when using pycrypto 2.0.1 and older
------------------------------------
>>> key = codecs.decode("0000000000000000", 'hex')
>>> plaintext = codecs.decode("0000000000000000", 'hex')
>>> ek = 63
>>> cipher = ARC2.new(key,ARC2.MODE_ECB,effective_keylen=ek)
>>> codecs.encode(cipher.encrypt(plaintext), 'hex')
b'ebb773f993278eff'
"""
return ARC2(key,mode,IV,counter,effective_keylen,segment_size)
class ARC2(BlockCipher):
def __init__(self,key,mode,IV,counter,effective_keylen,segment_size):
# pycrypto versions newer than 2.0.1 will have support for "effective_keylen"
if parse_version(Crypto.__version__) <= parse_version("2.0.1"):
cipher_module = Crypto.Cipher.ARC2.new
args = {}
else:
cipher_module = Crypto.Cipher.ARC2.new
args = {'effective_keylen':effective_keylen}
self.blocksize = 8
BlockCipher.__init__(self,key,mode,IV,counter,cipher_module,segment_size,args)
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
| 38.306452
| 88
| 0.656421
| 565
| 0.237895
| 0
| 0
| 0
| 0
| 0
| 0
| 1,517
| 0.638737
|
55e1293b8209552c67ecb749af45c55f2d9be6aa
| 1,121
|
py
|
Python
|
extensions/roles.py
|
iLuiizUHD/Expertise-Bot-v2
|
2b5264804d14d74ce1c0511dede434b7225683e0
|
[
"MIT"
] | 2
|
2020-11-01T02:44:58.000Z
|
2021-02-21T18:05:39.000Z
|
extensions/roles.py
|
iLuiizUHD/Expertise-Bot-v2
|
2b5264804d14d74ce1c0511dede434b7225683e0
|
[
"MIT"
] | 1
|
2020-09-13T20:53:26.000Z
|
2020-09-13T20:53:26.000Z
|
extensions/roles.py
|
iLuiizUHD/ExpertiseBot2
|
2b5264804d14d74ce1c0511dede434b7225683e0
|
[
"MIT"
] | null | null | null |
# Utilities
import json
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
# Imports
from discord.ext import commands
from discord import Guild, Role
# Loading config file...
with open("./config.json", "r", encoding="utf-8") as config:
configFile = json.load(config)
class Roles(commands.Cog, name="Roles management"):
def __init__(self, client):
self.client = client
# MySQL
self.connectionStr = configFile["config"]["db"]
# MySQL SQLAlchemy Engine Creation
self.MySQLEngine = create_engine(
self.connectionStr,
pool_size=10,
pool_recycle=3600,
max_overflow=5,
echo=True
)
# SQL Alchemy session
self.sqlSession = sessionmaker(bind=self.MySQLEngine)
self.session = self.sqlSession()
@commands.command(name="Get channel roles", pass_context=True)
async def role(self, ctx, command_='get', role: Role = None):
roles = await Guild.fetch_roles(ctx.guild)
print(roles)
def setup(client):
client.add_cog(Roles(client))
| 26.069767
| 66
| 0.64942
| 765
| 0.682426
| 0
| 0
| 200
| 0.178412
| 133
| 0.118644
| 185
| 0.165031
|
55e14400b4aed5430ec4803712092997b45a1d19
| 4,076
|
py
|
Python
|
amun/measure_accuracy.py
|
Elkoumy/amun
|
db07129450979cb8dd95b086b8e4187facb85bb8
|
[
"Apache-2.0"
] | 10
|
2020-12-03T08:30:51.000Z
|
2021-12-12T11:03:47.000Z
|
amun/measure_accuracy.py
|
Elkoumy/amun
|
db07129450979cb8dd95b086b8e4187facb85bb8
|
[
"Apache-2.0"
] | 1
|
2021-10-01T09:52:26.000Z
|
2021-10-07T08:52:46.000Z
|
amun/measure_accuracy.py
|
Elkoumy/amun
|
db07129450979cb8dd95b086b8e4187facb85bb8
|
[
"Apache-2.0"
] | null | null | null |
"""
In this module, we implement the accuracy measures to evaluate the effect of differential privacy injection.
In this module, we support the following measures:
* F1-score.
* Earth Mover's distance.
"""
from scipy.stats import wasserstein_distance
from pm4py.algo.discovery.inductive import factory as inductive_miner
from pm4py.evaluation.replay_fitness import factory as replay_factory
from math import fabs
import pandas as pd
def earth_mover_dist(dfg1, dfg2):
# need to consider for zero frequncies as the counter object don't include it
# after the discussion, we decided to let the user know about that issue and maybe has can handle it on his own.
v1=list(dfg1.values())
v2=list(dfg2.values())
distance = wasserstein_distance(v1,v2)
return distance
def percentage_dist(dfg1,dfg2):
#returns the maximum percentage difference between the two DFGs
distance =0
distance_dist={}
for key in dfg1.keys():
if dfg1[key]!=0: #division by zero
diff = fabs(dfg1[key]-dfg2[key])/dfg1[key]
else:
diff = fabs( ((100-dfg1[key]) - (100-dfg2[key])) / (100-dfg1[key]) )
distance_dist[key]=diff
if diff>distance:
distance=diff
return distance, distance_dist
def error_calculation(dfg1,dfg2):
#return MAPE, SMAPE, and distribution of APE between two DFGs.
total =0
smape_acc=0
APE_dist={}
MAPE_dist={}
SMAPE_dist={}
for key in dfg1.keys():
if dfg1[key]!=0: #division by zero
diff = fabs(dfg1[key]-dfg2[key])/fabs(dfg1[key])
smape= abs(dfg1[key] - dfg2[key]) / abs(dfg1[key] + dfg2[key])
else:
diff = fabs( ((100-dfg1[key]) - (100-dfg2[key])) / fabs(100-dfg1[key]) )
smape= abs((100-dfg1[key] )- (100-dfg2[key])) / abs((100-dfg1[key]) + (100-dfg2[key]))
APE_dist[key]=diff
smape_acc +=smape
SMAPE_dist[key]=smape
# smape_acc+=abs(dfg1[key]-dfg2[key])/(dfg1[key]+dfg2[key])
total+=diff
MAPE= total/len(dfg1.keys())
SMAPE=smape_acc/len(dfg1.keys())
return MAPE, SMAPE, APE_dist, SMAPE_dist
def f1_score(xes_file,dfg1,dfg2):
f1_score_1, f1_score_2=0,0
#first we use inductive miner to generate the petric nets of both the DFGs
net1, initial_marking1, final_marking1 = inductive_miner.apply(dfg1)
net2, initial_marking2, final_marking2 = inductive_miner.apply(dfg2)
fitness_1 = replay_factory.apply(xes_file, net1, initial_marking1, final_marking1)
fitness_2 = replay_factory.apply(xes_file, net2, initial_marking2, final_marking2)
return fitness_1, fitness_2
def estimate_SMAPE_variant_and_time(data, variant_counts):
smape_variant=0
# mape=((data['relative_time_original']-data["relative_time_anonymized"])/data['relative_time_original']).abs().mean()*100 #percentage
#$
# print("MAPE %s" %(((data['relative_time_original']-data["relative_time_anonymized"])/data['relative_time_original']).abs().mean()*100))
smape_time=((data['relative_time_original']-data["relative_time_anonymized"])/(data['relative_time_original'].abs()+data["relative_time_anonymized"].abs())).abs().mean()*100
variant_freq=pd.Series([ x['count'] for x in variant_counts])
variant_freq_anonymized= data.groupby(['trace_variant','case:concept:name'])['time:timestamp'].count().reset_index().groupby('trace_variant')['case:concept:name'].count()
smape_variant=((variant_freq-variant_freq_anonymized).abs()/(variant_freq+variant_freq_anonymized)).mean()*100
oversampling_per_variant=variant_freq_anonymized/variant_freq
avg_dilation_per_variant=oversampling_per_variant.mean()
oversampling_ratio=data['case:concept:name'].unique().size/variant_freq.sum()
oversampling_df=pd.DataFrame()
oversampling_df['variant_freq_anonymized'] = variant_freq_anonymized
oversampling_df['variant_freq'] = variant_freq
oversampling_df['dilation_per_variant'] = oversampling_per_variant
return data, smape_time, smape_variant, oversampling_ratio,oversampling_df
| 41.591837
| 177
| 0.707802
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,239
| 0.303974
|
55e1eb5bf2eb00d7ba492fd1c7a964baab5327be
| 10,845
|
py
|
Python
|
mkt/translations/models.py
|
ngokevin/zamboni
|
a33dcd489175d8e7ba1c02ee4dabb6cfdc405e69
|
[
"BSD-3-Clause"
] | null | null | null |
mkt/translations/models.py
|
ngokevin/zamboni
|
a33dcd489175d8e7ba1c02ee4dabb6cfdc405e69
|
[
"BSD-3-Clause"
] | null | null | null |
mkt/translations/models.py
|
ngokevin/zamboni
|
a33dcd489175d8e7ba1c02ee4dabb6cfdc405e69
|
[
"BSD-3-Clause"
] | null | null | null |
import collections
from itertools import groupby
from django.db import connections, models, router
from django.db.models.deletion import Collector
from django.utils import encoding
import bleach
import commonware.log
from mkt.site.models import ManagerBase, ModelBase
from mkt.site.utils import linkify_with_outgoing
from . import utils
log = commonware.log.getLogger('z.translations')
class TranslationManager(ManagerBase):
def remove_for(self, obj, locale):
"""Remove a locale for the given object."""
ids = [getattr(obj, f.attname) for f in obj._meta.translated_fields]
qs = Translation.objects.filter(id__in=filter(None, ids),
locale=locale)
qs.update(localized_string=None, localized_string_clean=None)
class Translation(ModelBase):
"""
Translation model.
Use :class:`translations.fields.TranslatedField` instead of a plain foreign
key to this model.
"""
autoid = models.AutoField(primary_key=True)
id = models.IntegerField()
locale = models.CharField(max_length=10)
localized_string = models.TextField(null=True)
localized_string_clean = models.TextField(null=True)
objects = TranslationManager()
class Meta:
db_table = 'translations'
unique_together = ('id', 'locale')
def __unicode__(self):
return self.localized_string and unicode(self.localized_string) or ''
def __nonzero__(self):
# __nonzero__ is called to evaluate an object in a boolean context. We
# want Translations to be falsy if their string is empty.
return (bool(self.localized_string) and
bool(self.localized_string.strip()))
def __eq__(self, other):
# Django implements an __eq__ that only checks pks. We need to check
# the strings if we're dealing with existing vs. unsaved Translations.
return self.__cmp__(other) == 0
def __cmp__(self, other):
if hasattr(other, 'localized_string'):
return cmp(self.localized_string, other.localized_string)
else:
return cmp(self.localized_string, other)
def clean(self):
if self.localized_string:
self.localized_string = self.localized_string.strip()
def save(self, **kwargs):
self.clean()
return super(Translation, self).save(**kwargs)
def delete(self, using=None):
# FIXME: if the Translation is the one used as default/fallback,
# then deleting it will mean the corresponding field on the related
# model will stay empty even if there are translations in other
# languages!
cls = self.__class__
using = using or router.db_for_write(cls, instance=self)
# Look for all translations for the same string (id=self.id) except the
# current one (autoid=self.autoid).
qs = cls.objects.filter(id=self.id).exclude(autoid=self.autoid)
if qs.using(using).exists():
# If other Translations for the same id exist, we just need to
# delete this one and *only* this one, without letting Django
# collect dependencies (it'd remove the others, which we want to
# keep).
assert self._get_pk_val() is not None
collector = Collector(using=using)
collector.collect([self], collect_related=False)
# In addition, because we have FK pointing to a non-unique column,
# we need to force MySQL to ignore constraints because it's dumb
# and would otherwise complain even if there are remaining rows
# that matches the FK.
with connections[using].constraint_checks_disabled():
collector.delete()
else:
# If no other Translations with that id exist, then we should let
# django behave normally. It should find the related model and set
# the FKs to NULL.
return super(Translation, self).delete(using=using)
delete.alters_data = True
@classmethod
def _cache_key(cls, pk, db):
# Hard-coding the class name here so that subclasses don't try to cache
# themselves under something like "o:translations.purifiedtranslation".
#
# Like in ModelBase, we avoid putting the real db in the key because it
# does us more harm than good.
key_parts = ('o', 'translations.translation', pk, 'default')
return ':'.join(map(encoding.smart_unicode, key_parts))
@classmethod
def new(cls, string, locale, id=None):
"""
Jumps through all the right hoops to create a new translation.
If ``id`` is not given a new id will be created using
``translations_seq``. Otherwise, the id will be used to add strings to
an existing translation.
To increment IDs we use a setting on MySQL. This is to support multiple
database masters -- it's just crazy enough to work! See bug 756242.
"""
if id is None:
# Get a sequence key for the new translation.
cursor = connections['default'].cursor()
cursor.execute("""UPDATE translations_seq
SET id=LAST_INSERT_ID(id + @@global.auto_increment_increment)""")
# The sequence table should never be empty. But alas, if it is,
# let's fix it.
if not cursor.rowcount > 0:
cursor.execute("""INSERT INTO translations_seq (id)
VALUES(LAST_INSERT_ID(id + @@global.auto_increment_increment))""")
cursor.execute('SELECT LAST_INSERT_ID()')
id = cursor.fetchone()[0]
# Update if one exists, otherwise create a new one.
q = {'id': id, 'locale': locale}
try:
trans = cls.objects.get(**q)
trans.localized_string = string
except cls.DoesNotExist:
trans = cls(localized_string=string, **q)
return trans
class PurifiedTranslation(Translation):
"""Run the string through bleach to get a safe version."""
allowed_tags = [
'a',
'abbr',
'acronym',
'b',
'blockquote',
'code',
'em',
'i',
'li',
'ol',
'strong',
'ul',
]
allowed_attributes = {
'a': ['href', 'title', 'rel'],
'abbr': ['title'],
'acronym': ['title'],
}
class Meta:
proxy = True
def __unicode__(self):
if not self.localized_string_clean:
self.clean()
return unicode(self.localized_string_clean)
def __html__(self):
return unicode(self)
def __truncate__(self, length, killwords, end):
return utils.truncate(unicode(self), length, killwords, end)
def clean(self):
super(PurifiedTranslation, self).clean()
cleaned = self.clean_localized_string()
self.localized_string_clean = utils.clean_nl(cleaned).strip()
def clean_localized_string(self):
# All links (text and markup) are normalized.
linkified = linkify_with_outgoing(self.localized_string)
# Keep only the allowed tags and attributes, escape the rest.
return bleach.clean(linkified, tags=self.allowed_tags,
attributes=self.allowed_attributes)
class LinkifiedTranslation(PurifiedTranslation):
"""Run the string through bleach to get a linkified version."""
allowed_tags = ['a']
class Meta:
proxy = True
class NoLinksMixin(object):
"""Mixin used to remove links (URLs and text) from localized_string."""
def clean_localized_string(self):
# First pass: bleach everything, but leave links untouched.
cleaned = super(NoLinksMixin, self).clean_localized_string()
# Second pass: call linkify to empty the inner text of all links.
emptied_links = bleach.linkify(
cleaned, callbacks=[lambda attrs, new: {'_text': ''}])
# Third pass: now strip links (only links will be stripped, other
# forbidden tags are already bleached/escaped.
allowed_tags = self.allowed_tags[:] # Make a copy.
allowed_tags.remove('a')
return bleach.clean(emptied_links, tags=allowed_tags, strip=True)
class NoLinksTranslation(NoLinksMixin, PurifiedTranslation):
"""Run the string through bleach, escape markup and strip all the links."""
class Meta:
proxy = True
class NoLinksNoMarkupTranslation(NoLinksMixin, LinkifiedTranslation):
"""Run the string through bleach, escape markup and strip all the links."""
class Meta:
proxy = True
class TranslationSequence(models.Model):
"""
The translations_seq table, so syncdb will create it during testing.
"""
id = models.IntegerField(primary_key=True)
class Meta:
db_table = 'translations_seq'
def delete_translation(obj, fieldname):
field = obj._meta.get_field(fieldname)
trans_id = getattr(obj, field.attname)
obj.update(**{field.name: None})
if trans_id:
Translation.objects.filter(id=trans_id).delete()
def _sorted_groupby(seq, key):
return groupby(sorted(seq, key=key), key=key)
def attach_trans_dict(model, objs):
"""Put all translations into a translations dict."""
# Get the ids of all the translations we need to fetch.
fields = model._meta.translated_fields
ids = [getattr(obj, f.attname) for f in fields
for obj in objs if getattr(obj, f.attname, None) is not None]
# Get translations in a dict, ids will be the keys. It's important to
# consume the result of groupby, which is an iterator.
qs = Translation.objects.filter(id__in=ids, localized_string__isnull=False)
all_translations = dict((k, list(v)) for k, v in
_sorted_groupby(qs, lambda trans: trans.id))
def get_locale_and_string(translation, new_class):
"""Convert the translation to new_class (making PurifiedTranslations
and LinkifiedTranslations work) and return locale / string tuple."""
converted_translation = new_class()
converted_translation.__dict__ = translation.__dict__
return (converted_translation.locale.lower(),
unicode(converted_translation))
# Build and attach translations for each field on each object.
for obj in objs:
obj.translations = collections.defaultdict(list)
for field in fields:
t_id = getattr(obj, field.attname, None)
field_translations = all_translations.get(t_id, None)
if not t_id or field_translations is None:
continue
obj.translations[t_id] = [get_locale_and_string(t, field.rel.to)
for t in field_translations]
| 36.0299
| 100
| 0.643799
| 8,505
| 0.784232
| 0
| 0
| 1,950
| 0.179806
| 0
| 0
| 3,932
| 0.362563
|
55e27b739ace5413321cb8d38b36117252a799e4
| 2,564
|
py
|
Python
|
flow/sequential.py
|
altosaar/hierarchical-variational-models-physics
|
611d91e0281664d7d5ba1679bec7adfb3aac41e2
|
[
"MIT"
] | 14
|
2020-05-10T20:44:49.000Z
|
2022-01-12T23:06:24.000Z
|
flow/sequential.py
|
altosaar/hierarchical-variational-models-physics
|
611d91e0281664d7d5ba1679bec7adfb3aac41e2
|
[
"MIT"
] | null | null | null |
flow/sequential.py
|
altosaar/hierarchical-variational-models-physics
|
611d91e0281664d7d5ba1679bec7adfb3aac41e2
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn
class FlowSequential(nn.Sequential):
"""Forward pass with log determinant of the Jacobian."""
def forward(self, input, context=None):
total_log_prob = torch.zeros(input.size(0), device=input.device)
for block in self._modules.values():
input, log_prob = block(input, context)
total_log_prob += log_prob
return input, total_log_prob
def inverse(self, input, context=None):
total_log_prob = torch.zeros(input.size(0), device=input.device)
for block in reversed(self._modules.values()):
input, log_prob = block.inverse(input, context)
total_log_prob += log_prob
return input, total_log_prob
def get_memory():
torch.cuda.synchronize()
max_memory = torch.cuda.max_memory_allocated()
memory = torch.cuda.memory_allocated()
return memory / 10**9, max_memory / 10**9
class RealNVPSequential(nn.Sequential):
"""Assumes first and last module are CheckerSplit and CheckerUnsplit."""
def forward(self, input, context=None):
total_log_prob = torch.zeros(input.size(0), device=input.device)
modules = list(self._modules.values())
split = modules.pop(0)
concat = modules.pop()
transf, const = split(input)
for module in modules:
transf, const, log_prob = module(transf, const, context)
total_log_prob += log_prob
return concat(transf, const), total_log_prob
def inverse(self, input, context=None):
total_log_prob = torch.zeros(input.size(0), device=input.device)
modules = list(self._modules.values())
split = modules.pop(0)
concat = modules.pop()
transf, const = split(input)
for module in reversed(modules):
transf, const, log_prob = module.inverse(transf, const, context)
total_log_prob += log_prob
return concat(transf, const), total_log_prob
class SplitSequential(nn.Sequential):
"""Assumes first and last module are CheckerSplit and CheckerConcat."""
def forward(self, transf, const, context=None):
total_log_prob = torch.zeros(transf.size(0), device=transf.device)
for module in self._modules.values():
transf, const, log_prob = module(transf, const, context)
total_log_prob += log_prob
return transf, const, total_log_prob
def inverse(self, transf, const, context=None):
total_log_prob = torch.zeros(transf.size(0), device=transf.device)
for module in reversed(self._modules.values()):
transf, const, log_prob = module.inverse(transf, const, context)
total_log_prob += log_prob
return transf, const, total_log_prob
| 35.123288
| 74
| 0.710608
| 2,333
| 0.909906
| 0
| 0
| 0
| 0
| 0
| 0
| 199
| 0.077613
|
55e3a6acd9ba82563797c1dceb04e6f788b6036d
| 3,827
|
py
|
Python
|
inmoov/scripts/animation_executor.py
|
mish3albaiz/Robotics_ECE579
|
efb654040015671a0656eaee4c78ec085d862996
|
[
"BSD-3-Clause"
] | 1
|
2020-02-13T21:13:08.000Z
|
2020-02-13T21:13:08.000Z
|
inmoov/scripts/animation_executor.py
|
mish3albaiz/Robotics_ECE579
|
efb654040015671a0656eaee4c78ec085d862996
|
[
"BSD-3-Clause"
] | null | null | null |
inmoov/scripts/animation_executor.py
|
mish3albaiz/Robotics_ECE579
|
efb654040015671a0656eaee4c78ec085d862996
|
[
"BSD-3-Clause"
] | null | null | null |
import time
from os.path import join, dirname
import sys
whereami = dirname(__file__)
scripts_dir= join(whereami, "../scripts/")
sys.path.append(scripts_dir)
from json_parsing import read_json
import Inmoov
filename_pose = join(whereami, '../json/pose.json')
filename_animation = join(whereami, '../json/animations.json')
# global objects that hold the json file contents
# so i can control when/how often to read the json file
# in the inmoov object, when it receives messages, it only needs to update at bootup. json will not change after bootup.
# in the gui, it should update each time it tries to run, because the gui is editing the files.
global_poses = None
global_animations = None
def update_animations():
global global_animations
global_animations = read_json(filename_animation)
def update_poses():
global global_poses
global_poses = read_json(filename_pose)
# TODO: if we are keeping the killlist idea, make it cleaner & easy to remove when transferring to a robot that doesn't need it
# TODO: be more intelligent about when we need to read the animation/pose json files
def do_animation(the_inmoov, animation_name):
update_animations()
print("Executing animation ", str(animation_name))
if animation_name not in global_animations:
print("FAIL TO FIND: ANIMATION '%s'" % str(animation_name))
return
#for key, pose_info in sorted(animation_data[animation_name].items()):
# this method better supports animations >= 10 frames long
# because using sorted() on 1-12 returns [1, 10, 11, 12, 2, 3, 4, 5, etc]
this_animation_dict = global_animations[animation_name]
t = 1
while str(t) in this_animation_dict:
# pose_info is a list with item0 = posename and item1 = holdtime
pose_info = this_animation_dict[str(t)]
print("\n********* Executing pose {} *********\n".format(str(pose_info[0])))
do_pose(the_inmoov, pose_info[0], pose_info[1])
t += 1
print("\nANIMATION COMPLETE!\n")
#killtime = 1
killlist = ["left_shoulder_lift_front","left_arm_rotate","right_arm_rotate","right_shoulder_lift_front"]
def do_pose(the_inmoov, pose_name, hold_time=0):
killtime = 1
update_poses()
if pose_name not in global_poses:
print("FAIL TO FIND: POSE '%s'" % str(pose_name))
return
hold_time = float(hold_time)
pose_data = global_poses[pose_name]
for servo_name, servo_angle in pose_data.items():
#Obtain a handle to the actual servo object
fservo = the_inmoov.find_servo_by_name(str(servo_name))
if fservo.curr_angle == servo_angle:
# if telling it to move to a position it's already at, skip it instead, it doesnt need to move
print('Skipping', servo_name)
else:
fservo.rotate(float(servo_angle))
print('Setting {} servo to an angle of {}'.format(servo_name, servo_angle))
# if servo_name == 'right_lift_front':
# killtime = abs((7.5/90)*(fservo.curr_angle - servo_angle))
if hold_time != 0:
print('\n--------------- Hold for {} second(s) ---------------'.format(hold_time))
# # todo: handle corner case where hold_time < killtime
# time.sleep(killtime)
# # kill all servos that can safely hold position wihtout power
# for killname in killlist:
# fservo = this_inmoov.find_servo_by_name(str(killname))
# fservo.off()
# time.sleep(hold_time - killtime)
time.sleep(hold_time)
if __name__ == '__main__':
this_inmoov = Inmoov.Inmoov()
do_animation(this_inmoov, 'rps_paper')
time.sleep(5)
exit()
do_animation(this_inmoov, 'headright_anim')
time.sleep(5)
do_animation(this_inmoov, 'headleft_anim')
time.sleep(5)
do_animation(this_inmoov, 'headright_anim')
time.sleep(5)
| 37.891089
| 127
| 0.686961
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,834
| 0.479227
|
55e3e019d60ec9acd28cad6159176037b75aa670
| 930
|
py
|
Python
|
Python/1629.py
|
GeneralLi95/leetcode
|
f42392f2283e19ec76273d81b2912944f9039568
|
[
"MIT"
] | null | null | null |
Python/1629.py
|
GeneralLi95/leetcode
|
f42392f2283e19ec76273d81b2912944f9039568
|
[
"MIT"
] | null | null | null |
Python/1629.py
|
GeneralLi95/leetcode
|
f42392f2283e19ec76273d81b2912944f9039568
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from typing import List, Optional
from collections import defaultdict, deque
from itertools import product,combinations,permutations
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
# -------------------------
class Solution:
def slowestKey(self, releaseTimes: List[int], keysPressed: str) -> str:
n = len(releaseTimes)
max_time = 0
result = ''
for i in range(n):
if i == 0:
time = releaseTimes[0]
result = keysPressed[0]
else:
time = releaseTimes[i] - releaseTimes[i-1]
if time > max_time:
max_time = time
result = keysPressed[i]
elif time == max_time:
result = max(result, keysPressed[i])
return result
# -------------------------
a = Solution()
b = [9,29,49,50]
c = "cbcd"
b2 = [19,22,28,29,66,81,93,97]
c2 = "fnfaaxha"
b3 = [12,23,36,46,62]
c3 = "spuda"
print(Solution.slowestKey(a, b3, c3))
| 20.217391
| 72
| 0.615054
| 538
| 0.578495
| 0
| 0
| 0
| 0
| 0
| 0
| 101
| 0.108602
|
55e424ce8e62dc85462716ba6efd8eff1ffa1fd9
| 530
|
py
|
Python
|
hexrd/sglite/setup.py
|
glemaitre/hexrd
|
b68b1ba72e0f480d29bdaae2adbd6c6e2380cc7c
|
[
"BSD-3-Clause"
] | null | null | null |
hexrd/sglite/setup.py
|
glemaitre/hexrd
|
b68b1ba72e0f480d29bdaae2adbd6c6e2380cc7c
|
[
"BSD-3-Clause"
] | null | null | null |
hexrd/sglite/setup.py
|
glemaitre/hexrd
|
b68b1ba72e0f480d29bdaae2adbd6c6e2380cc7c
|
[
"BSD-3-Clause"
] | null | null | null |
from distutils.core import setup, Extension
srclist = ['sgglobal.c','sgcb.c','sgcharmx.c','sgfile.c',
'sggen.c','sghall.c','sghkl.c','sgltr.c','sgmath.c','sgmetric.c',
'sgnorm.c','sgprop.c','sgss.c','sgstr.c','sgsymbols.c',
'sgtidy.c','sgtype.c','sgutil.c','runtests.c','sglitemodule.c']
module = Extension('sglite', sources=srclist,
define_macros = [('PythonTypes', 1)])
setup (name='sglite',
description = 'space group info',
ext_modules = [module]
)
| 33.125
| 76
| 0.584906
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 256
| 0.483019
|
55e48ca73e642e82cfdfccf386ed40c0b2fba12d
| 725
|
py
|
Python
|
app/blogging/routes.py
|
Sjors/patron
|
a496097ad0821b677c8e710e8aceb587928be31c
|
[
"MIT"
] | 114
|
2018-12-30T20:43:37.000Z
|
2022-03-21T18:57:47.000Z
|
app/blogging/routes.py
|
Sjors/patron
|
a496097ad0821b677c8e710e8aceb587928be31c
|
[
"MIT"
] | 17
|
2019-04-25T20:20:57.000Z
|
2022-03-29T21:48:35.000Z
|
app/blogging/routes.py
|
Sjors/patron
|
a496097ad0821b677c8e710e8aceb587928be31c
|
[
"MIT"
] | 17
|
2019-01-02T06:37:11.000Z
|
2022-03-29T22:22:40.000Z
|
from app.blogging import bp
from datetime import datetime
from flask import flash, redirect, url_for
from flask_login import current_user
@bp.before_request
def protect():
'''
Registers new function to Flask-Blogging Blueprint that protects
updates to make them only viewable by paid subscribers.
'''
if current_user.is_authenticated:
if datetime.today() <= current_user.expiration:
return None
else:
flash('You must have a paid-up subscription \
to view updates.', 'warning')
return redirect(url_for('main.support'))
else:
flash('Please login to view updates.', 'warning')
return redirect(url_for('auth.login'))
| 31.521739
| 68
| 0.666207
| 0
| 0
| 0
| 0
| 584
| 0.805517
| 0
| 0
| 290
| 0.4
|
55e5362057afc71bf0071723cb854344bbc9e957
| 409
|
py
|
Python
|
mini_cluster_07.py
|
jgpattis/Desres-sars-cov-2-apo-mpro
|
90c07414040c0ea0bf54028e2f194d6509c8f526
|
[
"MIT"
] | null | null | null |
mini_cluster_07.py
|
jgpattis/Desres-sars-cov-2-apo-mpro
|
90c07414040c0ea0bf54028e2f194d6509c8f526
|
[
"MIT"
] | null | null | null |
mini_cluster_07.py
|
jgpattis/Desres-sars-cov-2-apo-mpro
|
90c07414040c0ea0bf54028e2f194d6509c8f526
|
[
"MIT"
] | null | null | null |
#cluster data into a small amount of clusters to later pull out structures
import pyemma.coordinates as coor
import numpy as np
sys = 'back'
tica_data = coor.load('tica_data_05/back_tica_data.h5')
n_clusters = 50
cl = coor.cluster_kmeans(tica_data, k=n_clusters, max_iter=50)
cl.save(f'{sys}_{n_clusters}_mini_cluster_object.h5', overwrite=True)
cl.write_to_hdf5(f'{sys}_{n_clusters}_cluster_dtrajs.h5')
| 27.266667
| 74
| 0.787286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 195
| 0.476773
|
55e68ec4c6def4aa1f467b3936144273058e5304
| 698
|
py
|
Python
|
pydaily/images/tests/test_color.py
|
codingPingjun/pydaily
|
966b96db05b3170f926aeb830ca6f81093a5371a
|
[
"Apache-2.0"
] | null | null | null |
pydaily/images/tests/test_color.py
|
codingPingjun/pydaily
|
966b96db05b3170f926aeb830ca6f81093a5371a
|
[
"Apache-2.0"
] | null | null | null |
pydaily/images/tests/test_color.py
|
codingPingjun/pydaily
|
966b96db05b3170f926aeb830ca6f81093a5371a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import os, sys, pdb
from pydaily.images import graymask2rgb
from pydaily import DATA_DIR
import numpy as np
from scipy import misc
import matplotlib.pyplot as plt
def test_graymask2rgb():
mask_img_path = os.path.join(DATA_DIR, "input/thyroid/mask/1273169.png")
assert os.path.exists(mask_img_path), "{} not a valid file".format(mask_img_path)
try:
mask_img = misc.imread(mask_img_path)
except:
print("Load {} error.".format(mask_img_path))
plt.imshow(mask_img, cmap='gray')
plt.show()
mask_rgb = graymask2rgb(mask_img, channel='r')
plt.imshow(mask_rgb)
plt.show()
if __name__ == '__main__':
test_graymask2rgb()
| 24.068966
| 85
| 0.694842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 111
| 0.159026
|
55e92561b0ff7599f7ae6a6d6d8a27dbdab535a8
| 63
|
py
|
Python
|
reqinstall/commands/freeze/__init__.py
|
QualiSystems/reqinstall
|
57268b185428b31368cb7246a20a6c7548fb44dc
|
[
"MIT"
] | null | null | null |
reqinstall/commands/freeze/__init__.py
|
QualiSystems/reqinstall
|
57268b185428b31368cb7246a20a6c7548fb44dc
|
[
"MIT"
] | null | null | null |
reqinstall/commands/freeze/__init__.py
|
QualiSystems/reqinstall
|
57268b185428b31368cb7246a20a6c7548fb44dc
|
[
"MIT"
] | null | null | null |
from reqinstall.commands.freeze.freeze import PipFreezeCommand
| 31.5
| 62
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
55ea56448f1d5c8396e0645cb61cbcf3e70761cc
| 1,784
|
py
|
Python
|
scripts/configure.py
|
materialdigital/pmd-server
|
fdc12fe3865e7783046ab5c50f00b71aceb07ebd
|
[
"BSD-3-Clause"
] | 1
|
2021-07-05T21:54:44.000Z
|
2021-07-05T21:54:44.000Z
|
scripts/configure.py
|
materialdigital/pmd-server
|
fdc12fe3865e7783046ab5c50f00b71aceb07ebd
|
[
"BSD-3-Clause"
] | 8
|
2021-06-14T15:03:06.000Z
|
2022-01-26T15:48:03.000Z
|
scripts/configure.py
|
materialdigital/pmd-server
|
fdc12fe3865e7783046ab5c50f00b71aceb07ebd
|
[
"BSD-3-Clause"
] | 3
|
2021-10-01T12:07:50.000Z
|
2021-11-22T10:59:44.000Z
|
#! /usr/bin/env python3
import json, sys, argparse
from os.path import isfile
# ******************************************************************************
parser = argparse.ArgumentParser(description='Reads config.json and writes out docker-environment files.')
parser.add_argument('file', nargs='?', help='optional input file, if omitted, read from stdin', default='-')
parser.add_argument('-v', '--verbose', action='store_true', help="be verbose")
args = parser.parse_args()
# ******************************************************************************
def load_config(file_name):
if isfile(file_name):
with open(file_name) as fh:
return json.load(fh)
elif file_name == '-':
return json.loads(sys.stdin.read())
else :
return dict()
def get_value(value):
if value.startswith("shared:"):
return shared_vars.get(value[7:], value)
else:
return value
if __name__ == '__main__':
config = load_config('static.json')
# prevents script from trying to read interactively from tty, only "proper" pipe allowed
if (args.file == '-' and sys.stdin.isatty()):
print ("Won't read input from tty (please use -h for help)", file=sys.stderr)
exit(1)
else:
filename = args.file
for env_file, entry in load_config(filename).items():
if env_file in config:
config[env_file].update(entry)
else:
config[env_file] = entry
shared_vars = config.pop('shared', dict())
for env_file, entry in config.items():
with open(env_file, 'w') as fh:
lines = [ "{}={}\n".format(key, get_value(val)) for key, val in entry.items()]
print("### Writing {}...". format(env_file))
fh.writelines(lines)
| 33.037037
| 108
| 0.570628
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 561
| 0.314462
|
55eab24c8b73ac11d50c210b2451b3c1e941b6bd
| 676
|
py
|
Python
|
src/lib/jianshu_parser/jianshuparser.py
|
eebook/jianshu2e-book
|
d638fb8c2f47cf8e91e9f74e2e1e5f61f3c98a48
|
[
"MIT"
] | 7
|
2019-01-02T14:52:48.000Z
|
2021-11-05T06:11:46.000Z
|
src/lib/jianshu_parser/jianshuparser.py
|
knarfeh/jianshu2e-book
|
d638fb8c2f47cf8e91e9f74e2e1e5f61f3c98a48
|
[
"MIT"
] | 2
|
2021-03-22T17:11:32.000Z
|
2021-12-13T19:36:17.000Z
|
src/lib/jianshu_parser/jianshuparser.py
|
ee-book/jianshu2e-book
|
d638fb8c2f47cf8e91e9f74e2e1e5f61f3c98a48
|
[
"MIT"
] | 2
|
2019-04-18T05:44:24.000Z
|
2021-06-10T09:35:44.000Z
|
# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
from src.lib.jianshu_parser.base import BaseParser
from src.lib.jianshu_parser.content.JianshuAuthor import JianshuAuthorInfo
from src.lib.jianshu_parser.content.JianshuArticle import JianshuArticle
class JianshuParser(BaseParser):
u"""
获得jianshu_info表中所需的内容
"""
def __init__(self, content):
self.dom = BeautifulSoup(content, 'lxml')
self.article_parser = JianshuArticle(self.dom)
return
def get_jianshu_info_list(self):
author_parser = JianshuAuthorInfo() # SinaBlog_Info表中的信息
author_parser.set_dom(self.dom)
return [author_parser.get_info()]
| 28.166667
| 74
| 0.724852
| 447
| 0.634943
| 0
| 0
| 0
| 0
| 0
| 0
| 115
| 0.163352
|
55ebf274b2c9e17190671385e32d419938db93a1
| 306
|
py
|
Python
|
vox/utils/__init__.py
|
DSciLab/voxpy
|
4d06ffc9a52f4a2ae1eaacda7da998e75d0cc4aa
|
[
"MIT"
] | null | null | null |
vox/utils/__init__.py
|
DSciLab/voxpy
|
4d06ffc9a52f4a2ae1eaacda7da998e75d0cc4aa
|
[
"MIT"
] | null | null | null |
vox/utils/__init__.py
|
DSciLab/voxpy
|
4d06ffc9a52f4a2ae1eaacda7da998e75d0cc4aa
|
[
"MIT"
] | null | null | null |
import numpy as np
from .one_hot import one_hot
from .rescale import LinearNormRescale255, \
CentralNormRescale255, \
GeneralNormRescale255
def threhold_seg(inp, th=0.5):
inp_ = np.copy(inp)
inp_[inp_>0.5] = 1.0
inp_[inp_<=0.5] = 0.0
return inp_
| 23.538462
| 45
| 0.611111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
55ec22a317bb062a3d79bbd46b18d734b28581cf
| 58
|
py
|
Python
|
minimally_sufficient_pandas/__init__.py
|
dexplo/minimally_sufficient_pandas
|
d07710f03daa757f5778aa66ee68952d03467809
|
[
"BSD-3-Clause"
] | null | null | null |
minimally_sufficient_pandas/__init__.py
|
dexplo/minimally_sufficient_pandas
|
d07710f03daa757f5778aa66ee68952d03467809
|
[
"BSD-3-Clause"
] | null | null | null |
minimally_sufficient_pandas/__init__.py
|
dexplo/minimally_sufficient_pandas
|
d07710f03daa757f5778aa66ee68952d03467809
|
[
"BSD-3-Clause"
] | null | null | null |
from ._pandas_accessor import _MSP
__version__ = '0.0.1'
| 14.5
| 34
| 0.758621
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7
| 0.12069
|
55ecaf06199d8ec889aab34a7ac5ad6a8dc82793
| 350
|
py
|
Python
|
src/rl/genotypes.py
|
xkp793003821/nas-segm-pytorch
|
c4b59ab56bd539bf08493c6d85072849213a3d62
|
[
"BSD-2-Clause"
] | null | null | null |
src/rl/genotypes.py
|
xkp793003821/nas-segm-pytorch
|
c4b59ab56bd539bf08493c6d85072849213a3d62
|
[
"BSD-2-Clause"
] | null | null | null |
src/rl/genotypes.py
|
xkp793003821/nas-segm-pytorch
|
c4b59ab56bd539bf08493c6d85072849213a3d62
|
[
"BSD-2-Clause"
] | null | null | null |
"""List of operations"""
from collections import namedtuple
Genotype = namedtuple('Genotype', 'encoder decoder')
OP_NAMES = [
'conv1x1',
'conv3x3',
'sep_conv_3x3',
'sep_conv_5x5',
'global_average_pool',
'conv3x3_dil3',
'conv3x3_dil12',
'sep_conv_3x3_dil3',
'sep_conv_5x5_dil6',
'skip_connect',
'none'
]
| 17.5
| 52
| 0.648571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 205
| 0.585714
|
55ece9e5b9ea1cfd57bf781ef73ac983e830b9f2
| 391
|
py
|
Python
|
solutions/python3/1089.py
|
sm2774us/amazon_interview_prep_2021
|
f580080e4a6b712b0b295bb429bf676eb15668de
|
[
"MIT"
] | 42
|
2020-08-02T07:03:49.000Z
|
2022-03-26T07:50:15.000Z
|
solutions/python3/1089.py
|
ajayv13/leetcode
|
de02576a9503be6054816b7444ccadcc0c31c59d
|
[
"MIT"
] | null | null | null |
solutions/python3/1089.py
|
ajayv13/leetcode
|
de02576a9503be6054816b7444ccadcc0c31c59d
|
[
"MIT"
] | 40
|
2020-02-08T02:50:24.000Z
|
2022-03-26T15:38:10.000Z
|
class Solution:
def duplicateZeros(self, arr: List[int]) -> None:
"""
Do not return anything, modify arr in-place instead.
"""
i = 0
for num in list(arr):
if i >= len(arr): break
arr[i] = num
if not num:
i += 1
if i < len(arr):
arr[i] = num
i += 1
| 27.928571
| 60
| 0.398977
| 391
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 76
| 0.194373
|
55ed312dab5a46153b2af52c1c2cf41104214f04
| 2,284
|
py
|
Python
|
tools/download_typed_ast.py
|
hugovk/typed_ast
|
8eed936014f81a55a3e17310629c40c0203327c3
|
[
"Apache-2.0"
] | null | null | null |
tools/download_typed_ast.py
|
hugovk/typed_ast
|
8eed936014f81a55a3e17310629c40c0203327c3
|
[
"Apache-2.0"
] | null | null | null |
tools/download_typed_ast.py
|
hugovk/typed_ast
|
8eed936014f81a55a3e17310629c40c0203327c3
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Hacky script to download linux and windows typed_ast wheels from appveyor and gcloud
import os
import os.path
import json
import sys
from urllib.request import urlopen
# Appveyor download for windows wheels
api_url = 'https://ci.appveyor.com/api/'
def get_json(path):
url = api_url + path
f = urlopen(url)
data = f.read()
return json.loads(data)
def download(url):
print('Downloading', url)
name = os.path.join('dist', os.path.split(url)[1])
with urlopen(url) as f:
data = f.read()
with open(name, 'wb') as f:
f.write(data)
def download_appveyor(version):
project_base = 'projects/ddfisher/typed-ast-a4xqu'
history = get_json(project_base + '/history?recordsNumber=20')
for build in history['builds']:
if build.get('tag') == version:
build_version = build['version']
build_version = str(build['buildId'])
break
else:
sys.exit("Couldn't find tag")
print(build_version)
build = get_json(project_base + '/builds/' + build_version)
for job in build['build']['jobs']:
artifact_url = 'buildjobs/{}/artifacts'.format(job['jobId'])
artifacts = get_json(artifact_url)
for artifact in artifacts:
download(api_url + artifact_url + '/' + artifact['fileName'])
# gcloud downloads for linux wehels
MIN_VER = 5
MAX_VER = 9
GCLOUD_URL = "https://storage.googleapis.com/typed-ast/typed_ast-{version}-cp3{pyver}-cp3{pyver}{abi_tag}-{platform}.whl"
def download_entries(base_url, version, platform):
entries = ""
for pyver in range(MIN_VER, MAX_VER + 1):
abi_tag = "" if pyver >= 8 else "m"
url = base_url.format(
version=version,
pyver=pyver,
abi_tag=abi_tag,
platform=platform)
download(url)
def main(argv):
if len(argv) != 2:
sys.exit("Usage: download_typed_ast.py version")
version = argv[1]
os.makedirs('dist', exist_ok=True)
download_entries(GCLOUD_URL, version, 'manylinux1_x86_64')
download_entries(GCLOUD_URL, version, 'manylinux1_i686')
download_entries(GCLOUD_URL, version, 'manylinux2014_aarch64')
download_appveyor(version)
if __name__ == '__main__':
main(sys.argv)
| 28.911392
| 121
| 0.652802
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 641
| 0.280648
|
55ee2be125f56e9339bd29f2a5e248d4c0042d7f
| 220
|
py
|
Python
|
Contest/Keyence2021/a/main.py
|
mpses/AtCoder
|
9c101fcc0a1394754fcf2385af54b05c30a5ae2a
|
[
"CC0-1.0"
] | null | null | null |
Contest/Keyence2021/a/main.py
|
mpses/AtCoder
|
9c101fcc0a1394754fcf2385af54b05c30a5ae2a
|
[
"CC0-1.0"
] | null | null | null |
Contest/Keyence2021/a/main.py
|
mpses/AtCoder
|
9c101fcc0a1394754fcf2385af54b05c30a5ae2a
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python3
(n,), a, b = [[*map(int, o.split())] for o in open(0)]
from itertools import*
*A, = accumulate(a, max)
print(ans := a[0] * b[0])
for i in range(1, n):
ans = max(ans, A[i] * b[i])
print(ans)
| 27.5
| 54
| 0.554545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 22
| 0.1
|
55efeb23d40cb01ba113e0e658a5c2e41b236597
| 10,879
|
py
|
Python
|
service.py
|
ViscaElAyush/CSE598
|
8e95436015d466d168005846473e9e3978423913
|
[
"MIT"
] | 35
|
2020-10-31T20:21:01.000Z
|
2022-01-29T18:28:44.000Z
|
service.py
|
ViscaElAyush/CSE598
|
8e95436015d466d168005846473e9e3978423913
|
[
"MIT"
] | null | null | null |
service.py
|
ViscaElAyush/CSE598
|
8e95436015d466d168005846473e9e3978423913
|
[
"MIT"
] | 10
|
2021-01-10T18:40:03.000Z
|
2022-02-09T04:19:27.000Z
|
#!/usr/bin/env python
# @author Simon Stepputtis <sstepput@asu.edu>, Interactive Robotics Lab, Arizona State University
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import rclpy
from policy_translation.srv import NetworkPT, TuneNetwork
from model_src.model import PolicyTranslationModel
from utils.network import Network
from utils.tf_util import trainOnCPU, limitGPUMemory
from utils.intprim.gaussian_model import GaussianModel
import tensorflow as tf
import numpy as np
import re
from cv_bridge import CvBridge, CvBridgeError
import cv2
import matplotlib.pyplot as plt
from utils.intprim.gaussian_model import GaussianModel
import glob
import json
import pickle
import copy
# Force TensorFlow to use the CPU
FORCE_CPU = True
# Use dropout at run-time for stochastif-forward passes
USE_DROPOUT = True
# Where can we find the trained model?
MODEL_PATH = "../GDrive/model/policy_translation"
# Where is a pre-trained faster-rcnn?
FRCNN_PATH = "../GDrive/rcnn"
# Where are the GloVe word embeddings?
GLOVE_PATH = "../GDrive/glove.6B.50d.txt"
# Where is the normalization of the dataset?
NORM_PATH = "../GDrive/normalization_v2.pkl"
if FORCE_CPU:
trainOnCPU()
else:
limitGPUMemory()
print("Running Policy Translation Model")
model = PolicyTranslationModel(
od_path=FRCNN_PATH,
glove_path=GLOVE_PATH,
special=None
)
bs = 2
model((
np.ones((bs, 15), dtype=np.int64),
np.ones((bs, 6, 5), dtype=np.float32),
np.ones((bs, 500, 7), dtype=np.float32)
))
model.load_weights(MODEL_PATH)
model.summary()
class NetworkService():
def __init__(self):
self.dictionary = self._loadDictionary(GLOVE_PATH)
self.regex = re.compile('[^a-z ]')
self.bridge = CvBridge()
self.history = []
rclpy.init(args=None)
self.node = rclpy.create_node("neural_network")
self.service_nn = self.node.create_service(NetworkPT, "/network", self.cbk_network_dmp_ros2)
self.normalization = pickle.load(open(NORM_PATH, mode="rb"), encoding="latin1")
print("Ready")
def runNode(self):
while rclpy.ok():
rclpy.spin_once(self.node)
self.node.destroy_service(self.service_nn)
self.node.destroy_service(self.service_tn)
rclpy.shutdown()
def _loadDictionary(self, file):
__dictionary = {}
__dictionary[""] = 0 # Empty string
fh = open(file, "r", encoding="utf-8")
for line in fh:
if len(__dictionary) >= 300000:
break
tokens = line.strip().split(" ")
__dictionary[tokens[0]] = len(__dictionary)
fh.close()
return __dictionary
def tokenize(self, language):
voice = self.regex.sub("", language.strip().lower())
tokens = []
for w in voice.split(" "):
idx = 0
try:
idx = self.dictionary[w]
except:
print("Unknown word: " + w)
tokens.append(idx)
return tokens
def normalize(self, value, v_min, v_max):
if (value.shape[1] != v_min.shape[0] or v_min.shape[0] != v_max.shape[0] or
len(value.shape) != 2 or len(v_min.shape) != 1 or len(v_max.shape) != 1):
raise ArrayDimensionMismatch()
value = np.copy(value)
v_min = np.tile(np.expand_dims(v_min, 0), [value.shape[0], 1])
v_max = np.tile(np.expand_dims(v_max, 0), [value.shape[0], 1])
value = (value - v_min) / (v_max - v_min)
return value
def interpolateTrajectory(self, trj, target):
current_length = trj.shape[0]
dimensions = trj.shape[1]
result = np.zeros((target, trj.shape[1]), dtype=np.float32)
for i in range(dimensions):
result[:,i] = np.interp(np.linspace(0.0, 1.0, num=target), np.linspace(0.0, 1.0, num=current_length), trj[:,i])
return result
def cbk_network_dmp_ros2(self, req, res):
res.trajectory, res.confidence, res.timesteps, res.weights, res.phase = self.cbk_network_dmp(req)
return res
def imgmsg_to_cv2(self, img_msg, desired_encoding="passthrough"):
if img_msg.encoding != "8UC3":
self.node.get_logger().info("Unrecognized image type: " + encoding)
exit(0)
dtype = "uint8"
n_channels = 3
dtype = np.dtype(dtype)
dtype = dtype.newbyteorder('>' if img_msg.is_bigendian else '<')
img_buf = np.asarray(img_msg.data, dtype=dtype) if isinstance(img_msg.data, list) else img_msg.data
if n_channels == 1:
im = np.ndarray(shape=(img_msg.height, img_msg.width),
dtype=dtype, buffer=img_buf)
else:
im = np.ndarray(shape=(img_msg.height, img_msg.width, n_channels),
dtype=dtype, buffer=img_buf)
if img_msg.is_bigendian == (sys.byteorder == 'little'):
im = im.byteswap().newbyteorder()
if desired_encoding == 'passthrough':
return im
from cv_bridge.boost.cv_bridge_boost import cvtColor2
try:
res = cvtColor2(im, img_msg.encoding, desired_encoding)
except RuntimeError as e:
raise CvBridgeError(e)
return res
def cbk_network_dmp(self, req):
if req.reset:
self.req_step = 0
self.sfp_history = []
try:
image = self.imgmsg_to_cv2(req.image)
except CvBridgeError as e:
print(e)
language = self.tokenize(req.language)
self.language = language + [0] * (15-len(language))
image_features = model.frcnn(tf.convert_to_tensor([image], dtype=tf.uint8))
scores = image_features["detection_scores"][0, :6].numpy().astype(dtype=np.float32)
scores = [0.0 if v < 0.5 else 1.0 for v in scores.tolist()]
classes = image_features["detection_classes"][0, :6].numpy().astype(dtype=np.int32)
classes = [v * scores[k] for k, v in enumerate(classes.tolist())]
boxes = image_features["detection_boxes"][0, :6, :].numpy().astype(dtype=np.float32)
self.features = np.concatenate((np.expand_dims(classes,1), boxes), axis=1)
self.history = []
self.history.append(list(req.robot))
robot = np.asarray(self.history, dtype=np.float32)
self.input_data = (
tf.convert_to_tensor(np.tile([self.language],[250, 1]), dtype=tf.int64),
tf.convert_to_tensor(np.tile([self.features],[250, 1, 1]), dtype=tf.float32),
tf.convert_to_tensor(np.tile([robot],[250, 1, 1]), dtype=tf.float32)
)
generated, (atn, dmp_dt, phase, weights) = model(self.input_data, training=tf.constant(False), use_dropout=tf.constant(True))
self.trj_gen = tf.math.reduce_mean(generated, axis=0).numpy()
self.trj_std = tf.math.reduce_std(generated, axis=0).numpy()
self.timesteps = int(tf.math.reduce_mean(dmp_dt).numpy() * 500)
self.b_weights = tf.math.reduce_mean(weights, axis=0).numpy()
phase_value = tf.math.reduce_mean(phase, axis=0).numpy()
phase_value = phase_value[-1,0]
self.sfp_history.append(self.b_weights[-1,:,:])
if phase_value > 0.95 and len(self.sfp_history) > 100:
trj_len = len(self.sfp_history)
basismodel = GaussianModel(degree=11, scale=0.012, observed_dof_names=("Base","Shoulder","Ellbow","Wrist1","Wrist2","Wrist3","Gripper"))
domain = np.linspace(0, 1, trj_len, dtype=np.float64)
trajectories = []
for i in range(trj_len):
trajectories.append(np.asarray(basismodel.apply_coefficients(domain, self.sfp_history[i].flatten())))
trajectories = np.asarray(trajectories)
np.save("trajectories", trajectories)
np.save("history", self.history)
gen_trajectory = []
var_trj = np.zeros((trj_len, trj_len, 7), dtype=np.float32)
for w in range(trj_len):
gen_trajectory.append(trajectories[w,w,:])
gen_trajectory = np.asarray(gen_trajectory)
np.save("gen_trajectory", gen_trajectory)
self.sfp_history = []
self.req_step += 1
return (self.trj_gen.flatten().tolist(), self.trj_std.flatten().tolist(), self.timesteps, self.b_weights.flatten().tolist(), float(phase_value))
def idToText(self, id):
names = ["", "Yellow Small Round", "Red Small Round", "Green Small Round", "Blue Small Round", "Pink Small Round",
"Yellow Large Round", "Red Large Round", "Green Large Round", "Blue Large Round", "Pink Large Round",
"Yellow Small Square", "Red Small Square", "Green Small Square", "Blue Small Square", "Pink Small Square",
"Yellow Large Square", "Red Large Square", "Green Large Square", "Blue Large Square", "Pink Large Square",
"Cup Red", "Cup Green", "Cup Blue"]
return names[id]
def plotTrajectory(self, trj, error, image):
fig, ax = plt.subplots(3,3)
fig.set_size_inches(9, 9)
for sp in range(7):
idx = sp // 3
idy = sp % 3
ax[idx,idy].clear()
ax[idx,idy].plot(range(trj.shape[0]), trj[:,sp], alpha=0.5, color='mediumslateblue')
ax[idx,idy].errorbar(range(trj.shape[0]), trj[:,sp], xerr=None, yerr=error[:,sp], alpha=0.1, fmt='none', color='mediumslateblue')
ax[idx,idy].set_ylim([-0.1, 1.1])
ax[2,1].imshow(image)
def plotImageRegions(self, image_np, image_dict, atn):
# Visualization of the results of a detection.
tgt_object = np.argmax(atn)
num_detected = len([v for v in image_dict["detection_scores"][0] if v > 0.5])
num_detected = min(num_detected, len(atn))
for i in range(num_detected):
ymin, xmin, ymax, xmax = image_dict['detection_boxes'][0][i,:]
pt1 = (int(xmin*image_np.shape[1]), int(ymin*image_np.shape[0]))
pt2 = (int(xmax*image_np.shape[1]), int(ymax*image_np.shape[0]))
image_np = cv2.rectangle(image_np, pt1, pt2, (156, 2, 2), 1)
if i == tgt_object:
image_np = cv2.rectangle(image_np, pt1, pt2, (30, 156, 2), 2)
image_np = cv2.putText(image_np, "{:.1f}%".format(atn[i] * 100), (pt1[0]-10, pt1[1]-5), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (30, 156, 2), 2, cv2.LINE_AA)
fig = plt.figure()
plt.imshow(image_np)
if __name__ == "__main__":
ot = NetworkService()
ot.runNode()
| 40.593284
| 165
| 0.603364
| 9,210
| 0.846585
| 0
| 0
| 0
| 0
| 0
| 0
| 1,393
| 0.128045
|
55f05ed10bf6e796822641491b85dc1b12b2b7ba
| 375
|
py
|
Python
|
model/pet_breed.py
|
IDRISSOUM/hospital_management
|
56a768f29269a77bc890d40479a8aacb90867189
|
[
"Unlicense"
] | null | null | null |
model/pet_breed.py
|
IDRISSOUM/hospital_management
|
56a768f29269a77bc890d40479a8aacb90867189
|
[
"Unlicense"
] | null | null | null |
model/pet_breed.py
|
IDRISSOUM/hospital_management
|
56a768f29269a77bc890d40479a8aacb90867189
|
[
"Unlicense"
] | null | null | null |
# # -*- coding: utf-8 -*-
# # Part of BrowseInfo. See LICENSE file for full copyright and licensing details.
#
# from odoo import api, fields, models, _
#
# class pet_breed(models.Model):
# _name = 'pet.breed'
#
# name = fields.Char('Name', required = True)
# code = fields.Char('Code')
#
#
# # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 28.846154
| 82
| 0.653333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 363
| 0.968
|
55f120e7cddd6dd7d7bb9b4780eee99d7d17ddcc
| 797
|
py
|
Python
|
src/fireo/utils/utils.py
|
jshep23/FireO
|
f4ccac8461bcf821ae9665a942847aa9f28ee92b
|
[
"Apache-2.0"
] | null | null | null |
src/fireo/utils/utils.py
|
jshep23/FireO
|
f4ccac8461bcf821ae9665a942847aa9f28ee92b
|
[
"Apache-2.0"
] | null | null | null |
src/fireo/utils/utils.py
|
jshep23/FireO
|
f4ccac8461bcf821ae9665a942847aa9f28ee92b
|
[
"Apache-2.0"
] | null | null | null |
import re
from google.cloud import firestore
def collection_name(model):
return re.sub('(?!^)([A-Z]+)', r'_\1', model).lower()
def ref_path(key):
return key.split('/')
def collection_path(key):
return '/'.join(key.split('/')[:-1])
def get_parent(key):
return collection_path(key)
def get_parent_doc(key):
return '/'.join(key.split('/')[:-2])
def get_id(key):
try:
return key.split('/')[-1]
except AttributeError:
return None
def GeoPoint(latitude: float, longitude: float):
return firestore.GeoPoint(latitude, longitude)
def get_nested(dict, *args):
if args and dict:
element = args[0]
if element:
value = dict.get(element)
return value if len(args) == 1 else get_nested(value, *args[1:])
| 18.97619
| 76
| 0.61606
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 39
| 0.048934
|
55f43053f0d67231d40b9280a1fec18d43d92658
| 169
|
py
|
Python
|
src/rlib/debug.py
|
SOM-st/PySOM
|
65ef72f44252439b724a7429408dac7f8d1b1d98
|
[
"MIT"
] | 22
|
2015-10-29T05:11:06.000Z
|
2022-03-01T11:18:45.000Z
|
src/rlib/debug.py
|
smarr/PySOM
|
65ef72f44252439b724a7429408dac7f8d1b1d98
|
[
"MIT"
] | 16
|
2021-03-07T22:09:33.000Z
|
2021-08-24T12:36:15.000Z
|
src/rlib/debug.py
|
SOM-st/PySOM
|
65ef72f44252439b724a7429408dac7f8d1b1d98
|
[
"MIT"
] | 5
|
2015-01-02T03:51:29.000Z
|
2020-10-02T07:05:46.000Z
|
try:
from rpython.rlib.debug import make_sure_not_resized # pylint: disable=W
except ImportError:
"NOT_RPYTHON"
def make_sure_not_resized(_):
pass
| 21.125
| 77
| 0.715976
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 32
| 0.189349
|
55f5635ca095ac94e1e398b32c7f23cd1b5b52ae
| 12,173
|
py
|
Python
|
emr_eks_cdk/studio_live_stack.py
|
aws-samples/aws-cdk-for-emr-on-eks
|
20c51b8c845172ea77ee4e1dbde7ffd41cad427a
|
[
"MIT-0"
] | 9
|
2021-03-23T06:01:32.000Z
|
2021-12-28T09:01:45.000Z
|
emr_eks_cdk/studio_live_stack.py
|
aws-samples/aws-cdk-for-emr-on-eks
|
20c51b8c845172ea77ee4e1dbde7ffd41cad427a
|
[
"MIT-0"
] | 2
|
2021-07-27T09:53:04.000Z
|
2021-08-05T04:55:15.000Z
|
emr_eks_cdk/studio_live_stack.py
|
aws-samples/aws-cdk-for-emr-on-eks
|
20c51b8c845172ea77ee4e1dbde7ffd41cad427a
|
[
"MIT-0"
] | 9
|
2021-03-23T06:01:31.000Z
|
2021-12-29T14:03:14.000Z
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
from aws_cdk import aws_ec2 as ec2, aws_eks as eks, core, aws_emrcontainers as emrc, aws_iam as iam, aws_s3 as s3, custom_resources as custom, aws_acmpca as acmpca, aws_emr as emr
"""
This stack deploys the following:
- EMR Studio
"""
class StudioLiveStack(core.Stack):
def __init__(self, scope: core.Construct, construct_id: str, vpc: ec2.IVpc, **kwargs) -> None:
super().__init__(scope, construct_id, **kwargs)
# Create S3 bucket for Studio
bucket = s3.Bucket(self, "StudioBucket",
encryption=s3.BucketEncryption.S3_MANAGED,
block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
versioned = True
)
# Create security groups
eng_sg = ec2.SecurityGroup(self, "EngineSecurityGroup",
vpc=vpc,
description="EMR Studio Engine",
allow_all_outbound=True
)
core.Tags.of(eng_sg).add("for-use-with-amazon-emr-managed-policies", "true")
ws_sg = ec2.SecurityGroup(self, "WorkspaceSecurityGroup",
vpc=vpc,
description="EMR Studio Workspace",
allow_all_outbound=False
)
core.Tags.of(ws_sg).add("for-use-with-amazon-emr-managed-policies", "true")
ws_sg.add_egress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(443), "allow egress on port 443")
ws_sg.add_egress_rule(eng_sg, ec2.Port.tcp(18888), "allow egress on port 18888 to eng")
eng_sg.add_ingress_rule(ws_sg, ec2.Port.tcp(18888), "allow ingress on port 18888 from ws")
# Create Studio roles
role = iam.Role(self, "StudioRole",
assumed_by=iam.ServicePrincipal("elasticmapreduce.amazonaws.com"),
managed_policies=[
iam.ManagedPolicy.from_aws_managed_policy_name("AmazonS3FullAccess")
]
)
role.add_to_policy(iam.PolicyStatement(
resources=["*"],
actions=["ec2:AuthorizeSecurityGroupEgress",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:CreateSecurityGroup",
"ec2:CreateTags",
"ec2:DescribeSecurityGroups",
"ec2:RevokeSecurityGroupEgress",
"ec2:RevokeSecurityGroupIngress",
"ec2:CreateNetworkInterface",
"ec2:CreateNetworkInterfacePermission",
"ec2:DeleteNetworkInterface",
"ec2:DeleteNetworkInterfacePermission",
"ec2:DescribeNetworkInterfaces",
"ec2:ModifyNetworkInterfaceAttribute",
"ec2:DescribeTags",
"ec2:DescribeInstances",
"ec2:DescribeSubnets",
"ec2:DescribeVpcs",
"elasticmapreduce:ListInstances",
"elasticmapreduce:DescribeCluster",
"elasticmapreduce:ListSteps"],
effect=iam.Effect.ALLOW
))
core.Tags.of(role).add("for-use-with-amazon-emr-managed-policies", "true")
user_role = iam.Role(self, "StudioUserRole",
assumed_by=iam.ServicePrincipal("elasticmapreduce.amazonaws.com")
)
core.Tags.of(role).add("for-use-with-amazon-emr-managed-policies", "true")
user_role.add_to_policy(iam.PolicyStatement(
actions=["elasticmapreduce:CreateEditor",
"elasticmapreduce:DescribeEditor",
"elasticmapreduce:ListEditors",
"elasticmapreduce:StartEditor",
"elasticmapreduce:StopEditor",
"elasticmapreduce:DeleteEditor",
"elasticmapreduce:OpenEditorInConsole",
"elasticmapreduce:AttachEditor",
"elasticmapreduce:DetachEditor",
"elasticmapreduce:CreateRepository",
"elasticmapreduce:DescribeRepository",
"elasticmapreduce:DeleteRepository",
"elasticmapreduce:ListRepositories",
"elasticmapreduce:LinkRepository",
"elasticmapreduce:UnlinkRepository",
"elasticmapreduce:DescribeCluster",
"elasticmapreduce:ListInstanceGroups",
"elasticmapreduce:ListBootstrapActions",
"elasticmapreduce:ListClusters",
"elasticmapreduce:ListSteps",
"elasticmapreduce:CreatePersistentAppUI",
"elasticmapreduce:DescribePersistentAppUI",
"elasticmapreduce:GetPersistentAppUIPresignedURL",
"secretsmanager:CreateSecret",
"secretsmanager:ListSecrets",
"secretsmanager:TagResource",
"emr-containers:DescribeVirtualCluster",
"emr-containers:ListVirtualClusters",
"emr-containers:DescribeManagedEndpoint",
"emr-containers:ListManagedEndpoints",
"emr-containers:CreateAccessTokenForManagedEndpoint",
"emr-containers:DescribeJobRun",
"emr-containers:ListJobRuns"],
resources=["*"],
effect=iam.Effect.ALLOW
))
user_role.add_to_policy(iam.PolicyStatement(
resources=["*"],
actions=["servicecatalog:DescribeProduct",
"servicecatalog:DescribeProductView",
"servicecatalog:DescribeProvisioningParameters",
"servicecatalog:ProvisionProduct",
"servicecatalog:SearchProducts",
"servicecatalog:UpdateProvisionedProduct",
"servicecatalog:ListProvisioningArtifacts",
"servicecatalog:DescribeRecord",
"cloudformation:DescribeStackResources"],
effect=iam.Effect.ALLOW
))
user_role.add_to_policy(iam.PolicyStatement(
resources=["*"],
actions=["elasticmapreduce:RunJobFlow"],
effect=iam.Effect.ALLOW
))
user_role.add_to_policy(iam.PolicyStatement(
resources=[role.role_arn,
f"arn:aws:iam::{self.account}:role/EMR_DefaultRole",
f"arn:aws:iam::{self.account}:role/EMR_EC2_DefaultRole"],
actions=["iam:PassRole"],
effect=iam.Effect.ALLOW
))
user_role.add_to_policy(iam.PolicyStatement(
resources=["arn:aws:s3:::*"],
actions=["s3:ListAllMyBuckets",
"s3:ListBucket",
"s3:GetBucketLocation"],
effect=iam.Effect.ALLOW
))
user_role.add_to_policy(iam.PolicyStatement(
resources=[f"arn:aws:s3:::{bucket.bucket_name}/*",
f"arn:aws:s3:::aws-logs-{self.account}-{self.region}/elasticmapreduce/*"],
actions=["s3:GetObject"],
effect=iam.Effect.ALLOW
))
policy_document = {
"Version": "2012-10-17T00:00:00.000Z",
"Statement": [
{
"Action": [
"elasticmapreduce:CreateEditor",
"elasticmapreduce:DescribeEditor",
"elasticmapreduce:ListEditors",
"elasticmapreduce:StartEditor",
"elasticmapreduce:StopEditor",
"elasticmapreduce:DeleteEditor",
"elasticmapreduce:OpenEditorInConsole",
"elasticmapreduce:AttachEditor",
"elasticmapreduce:DetachEditor",
"elasticmapreduce:CreateRepository",
"elasticmapreduce:DescribeRepository",
"elasticmapreduce:DeleteRepository",
"elasticmapreduce:ListRepositories",
"elasticmapreduce:LinkRepository",
"elasticmapreduce:UnlinkRepository",
"elasticmapreduce:DescribeCluster",
"elasticmapreduce:ListInstanceGroups",
"elasticmapreduce:ListBootstrapActions",
"elasticmapreduce:ListClusters",
"elasticmapreduce:ListSteps",
"elasticmapreduce:CreatePersistentAppUI",
"elasticmapreduce:DescribePersistentAppUI",
"elasticmapreduce:GetPersistentAppUIPresignedURL",
"secretsmanager:CreateSecret",
"secretsmanager:ListSecrets",
"emr-containers:DescribeVirtualCluster",
"emr-containers:ListVirtualClusters",
"emr-containers:DescribeManagedEndpoint",
"emr-containers:ListManagedEndpoints",
"emr-containers:CreateAccessTokenForManagedEndpoint",
"emr-containers:DescribeJobRun",
"emr-containers:ListJobRuns"
],
"Resource": "*",
"Effect": "Allow",
"Sid": "AllowBasicActions"
},
{
"Action": [
"servicecatalog:DescribeProduct",
"servicecatalog:DescribeProductView",
"servicecatalog:DescribeProvisioningParameters",
"servicecatalog:ProvisionProduct",
"servicecatalog:SearchProducts",
"servicecatalog:UpdateProvisionedProduct",
"servicecatalog:ListProvisioningArtifacts",
"servicecatalog:DescribeRecord",
"cloudformation:DescribeStackResources"
],
"Resource": "*",
"Effect": "Allow",
"Sid": "AllowIntermediateActions"
},
{
"Action": [
"elasticmapreduce:RunJobFlow"
],
"Resource": "*",
"Effect": "Allow",
"Sid": "AllowAdvancedActions"
},
{
"Action": "iam:PassRole",
"Resource": [
role.role_arn,
f"arn:aws:iam::{self.account}:role/EMR_DefaultRole",
f"arn:aws:iam::{self.account}:role/EMR_EC2_DefaultRole"
],
"Effect": "Allow",
"Sid": "PassRolePermission"
},
{
"Action": [
"s3:ListAllMyBuckets",
"s3:ListBucket",
"s3:GetBucketLocation"
],
"Resource": "arn:aws:s3:::*",
"Effect": "Allow",
"Sid": "S3ListPermission"
},
{
"Action": [
"s3:GetObject"
],
"Resource": [
f"arn:aws:s3:::{bucket.bucket_name}/*",
f"arn:aws:s3:::aws-logs-{self.account}-{self.region}/elasticmapreduce/*"
],
"Effect": "Allow",
"Sid": "S3GetObjectPermission"
}
]
}
custom_policy_document = iam.PolicyDocument.from_json(policy_document)
new_managed_policy = iam.ManagedPolicy(self, "LBControlPolicy",
document=custom_policy_document
)
# Set up Studio
studio = emr.CfnStudio(self, "MyEmrStudio",
auth_mode = "SSO", default_s3_location = f"s3://{bucket.bucket_name}/studio/",
engine_security_group_id = eng_sg.security_group_id,
name = "MyEmrEksStudio",
service_role = role.role_arn,
subnet_ids = [n.subnet_id for n in vpc.private_subnets],
user_role = user_role.role_arn,
vpc_id = vpc.vpc_id,
workspace_security_group_id = ws_sg.security_group_id,
description=None,
tags=None)
core.CfnOutput(
self, "StudioUrl",
value=studio.attr_url
)
# Create session mapping
studiosm = emr.CfnStudioSessionMapping(self, "MyStudioSM",
identity_name = self.node.try_get_context("username"),
identity_type = "USER",
session_policy_arn = new_managed_policy.managed_policy_arn,
studio_id = studio.attr_studio_id)
| 43.78777
| 179
| 0.559271
| 11,834
| 0.972151
| 0
| 0
| 0
| 0
| 0
| 0
| 5,507
| 0.452395
|
55f657ac810bd7adff3d28ddcf6b426dbce9f289
| 291
|
py
|
Python
|
dev/user-agent-stacktrace/lib/utils.py
|
Katharine/apisnoop
|
46c0e101c6e1e13a783f5022a6f77787c0824032
|
[
"Apache-2.0"
] | null | null | null |
dev/user-agent-stacktrace/lib/utils.py
|
Katharine/apisnoop
|
46c0e101c6e1e13a783f5022a6f77787c0824032
|
[
"Apache-2.0"
] | 13
|
2018-08-21T04:00:44.000Z
|
2019-07-03T22:36:07.000Z
|
dev/user-agent-stacktrace/lib/utils.py
|
Katharine/apisnoop
|
46c0e101c6e1e13a783f5022a6f77787c0824032
|
[
"Apache-2.0"
] | 1
|
2019-05-09T18:47:22.000Z
|
2019-05-09T18:47:22.000Z
|
from collections import defaultdict
def defaultdicttree():
return defaultdict(defaultdicttree)
def defaultdict_to_dict(d):
if isinstance(d, defaultdict):
new_d = {}
for k, v in d.items():
new_d[k] = defaultdict_to_dict(v)
d = new_d
return d
| 22.384615
| 45
| 0.639175
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
55f6b77678597fe15229ac3cf620e327925c88f6
| 1,217
|
py
|
Python
|
WebMirror/management/rss_parser_funcs/feed_parse_extractKaedesan721TumblrCom.py
|
fake-name/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
[
"BSD-3-Clause"
] | 193
|
2016-08-02T22:04:35.000Z
|
2022-03-09T20:45:41.000Z
|
WebMirror/management/rss_parser_funcs/feed_parse_extractKaedesan721TumblrCom.py
|
fake-name/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
[
"BSD-3-Clause"
] | 533
|
2016-08-23T20:48:23.000Z
|
2022-03-28T15:55:13.000Z
|
WebMirror/management/rss_parser_funcs/feed_parse_extractKaedesan721TumblrCom.py
|
rrosajp/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
[
"BSD-3-Clause"
] | 19
|
2015-08-13T18:01:08.000Z
|
2021-07-12T17:13:09.000Z
|
def extractKaedesan721TumblrCom(item):
'''
Parser for 'kaedesan721.tumblr.com'
'''
bad_tags = [
'FanArt',
"htr asks",
'Spanish translations',
'htr anime','my thoughts',
'Cats',
'answered',
'ask meme',
'relay convos',
'translation related post',
'nightmare fuel',
'htr manga',
'memes',
'htrweek',
'Video Games',
'Animation',
'replies',
'jazz',
'Music',
]
if any([bad in item['tags'] for bad in bad_tags]):
return None
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
if "my translations" in item['tags']:
tagmap = [
('Hakata Tonkotsu Ramens', 'Hakata Tonkotsu Ramens', 'translated'),
('hakata tonktosu ramens', 'Hakata Tonkotsu Ramens', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| 26.456522
| 105
| 0.576828
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 491
| 0.403451
|
55f710f1ba87dd022df6c57369e502a39ab22bee
| 8,244
|
py
|
Python
|
l0bnb/tree.py
|
rahulmaz/L0BnB
|
72c262581dd2d7e1489668c2fb2052214b6bbcdd
|
[
"MIT"
] | 1
|
2020-04-16T03:40:36.000Z
|
2020-04-16T03:40:36.000Z
|
l0bnb/tree.py
|
rahulmaz/L0BnB
|
72c262581dd2d7e1489668c2fb2052214b6bbcdd
|
[
"MIT"
] | 1
|
2020-04-16T04:12:12.000Z
|
2020-04-16T04:12:12.000Z
|
l0bnb/tree.py
|
rahulmaz/L0BnB
|
72c262581dd2d7e1489668c2fb2052214b6bbcdd
|
[
"MIT"
] | 1
|
2020-04-16T03:42:19.000Z
|
2020-04-16T03:42:19.000Z
|
import time
import queue
import sys
import numpy as np
from scipy import optimize as sci_opt
from .node import Node
from .utilities import branch, is_integral
class BNBTree:
def __init__(self, x, y, inttol=1e-4, reltol=1e-4):
"""
Initiate a BnB Tree to solve the least squares regression problem with
l0l2 regularization
Parameters
----------
x: np.array
n x p numpy array
y: np.array
1 dimensional numpy array of size n
inttol: float
The integral tolerance of a variable.
reltol: float
primal-dual relative tolerance
"""
self.x = x
self.y = y
self.inttol = inttol
self.reltol = reltol
self.xi_xi = np.sum(x * x, axis=0)
# The number of features
self.p = x.shape[1]
self.n = x.shape[0]
self.node_bfs_queue = queue.Queue()
self.node_dfs_queue = queue.LifoQueue()
self.levels = {}
# self.leaves = []
self.number_of_nodes = 0
self.root = None
def solve(self, l0, l2, m, gaptol=1e-2, warm_start=None, mu=0.95,
branching='maxfrac', l1solver='l1cd', number_of_dfs_levels=0,
verbose=False):
"""
Solve the least squares problem with l0l2 regularization
Parameters
----------
l0: float
The zeroth norm coefficient
l2: float
The second norm coefficient
m: float
features bound (big M)
gaptol: float
the relative gap between the upper and lower bound after which the
algorithm will be terminated
warm_start: np.array
(p x 1) array representing a warm start
branching: str
'maxfrac' or 'strong'
l1solver: str
'l1cd', 'gurobi' or 'mosek'
mu: float
Used with strong branching
number_of_dfs_levels: int
number of levels to solve as dfs
verbose: int
print progress
Returns
-------
tuple
uppersol, upperbound, lower_bound, best_gap, sol_time
"""
st = time.time()
if warm_start is None:
upperbound = sys.maxsize
uppersol = None
else:
if verbose:
print("using a warm start")
support = np.nonzero(warm_start)[0]
x_support = self.x[:, support]
x_ridge = np.sqrt(2 * l2) * np.identity(len(support))
x_upper = np.concatenate((x_support, x_ridge), axis=0)
y_upper = np.concatenate((self.y, np.zeros(len(support))), axis=0)
res = sci_opt.lsq_linear(x_upper, y_upper, (-m, m))
upperbound = res.cost + l0 * len(support)
uppersol = warm_start
uppersol[support] = res.x
if verbose:
print(f"initializing using a warm start took {time.time() - st}")
# upper and lower bounds
zlb = np.zeros(self.p)
zub = np.ones(self.p)
# root node
self.root = Node(None, zlb, zub, x=self.x, y=self.y, l0=l0, l2=l2, m=m,
xi_xi=self.xi_xi)
self.node_bfs_queue.put(self.root)
# lower and upper bounds initialization
lower_bound = {}
dual_bound = {}
self.levels = {0: 1}
min_open_level = 0
if verbose:
print(f'solving using {number_of_dfs_levels} dfs levels')
while self.node_bfs_queue.qsize() > 0 or self.node_dfs_queue.qsize() > 0:
# get node
if self.node_dfs_queue.qsize() > 0:
current_node = self.node_dfs_queue.get()
else:
current_node = self.node_bfs_queue.get()
# print(current_node.level, upperbound, self.levels)
# prune?
if current_node.parent_cost and upperbound <= \
current_node.parent_cost:
self.levels[current_node.level] -= 1
# self.leaves.append(current_node)
continue
# calculate lower bound and update
self.number_of_nodes += 1
current_lower_bound, current_dual_cost = current_node.\
lower_solve(l1solver, self.reltol, self.inttol)
lower_bound[current_node.level] = \
min(current_lower_bound,
lower_bound.get(current_node.level, sys.maxsize))
dual_bound[current_node.level] = \
min(current_dual_cost,
dual_bound.get(current_node.level, sys.maxsize))
self.levels[current_node.level] -= 1
# update gap?
if self.levels[min_open_level] == 0:
del self.levels[min_open_level]
min_value = max([j for i, j in dual_bound.items()
if i <= min_open_level])
best_gap = (upperbound - min_value)/abs(upperbound)
if verbose:
print(f'l: {min_open_level}, (d: {min_value}, '
f'p: {lower_bound[min_open_level]}), u: {upperbound},'
f' g: {best_gap}, t: {time.time() - st} s')
# arrived at a solution?
if best_gap <= gaptol:
# self.leaves += [current_node] + \
# list(self.node_bfs_queue.queue) + \
# list(self.node_dfs_queue.queue)
return uppersol, upperbound, lower_bound, best_gap, \
time.time() - st
min_open_level += 1
# integral solution?
if is_integral(current_node.lower_bound_z, self.inttol):
current_upper_bound = current_lower_bound
if current_upper_bound < upperbound:
upperbound = current_upper_bound
uppersol = current_node.lower_bound_solution
# self.leaves.append(current_node)
if verbose:
print('itegral:', current_node)
# branch?
elif current_dual_cost < upperbound:
current_upper_bound = current_node.upper_solve()
if current_upper_bound < upperbound:
upperbound = current_upper_bound
uppersol = current_node.upper_bound_solution
left_node, right_node = branch(current_node, self.x, l0, l2, m,
self.xi_xi, self.inttol,
branching, mu)
self.levels[current_node.level + 1] = \
self.levels.get(current_node.level + 1, 0) + 2
if current_node.level < min_open_level + number_of_dfs_levels:
self.node_dfs_queue.put(right_node)
self.node_dfs_queue.put(left_node)
else:
self.node_bfs_queue.put(right_node)
self.node_bfs_queue.put(left_node)
# prune?
else:
pass
# self.leaves.append(current_node)
min_value = max([j for i, j in dual_bound.items()
if i <= min_open_level])
best_gap = (upperbound - min_value)/abs(upperbound)
return uppersol, upperbound, lower_bound, best_gap, time.time() - st
# def get_lower_optimal_node(self):
# self.leaves = sorted(self.leaves)
# if self.leaves[-1].lower_bound_value:
# return self.leaves[-1]
# else:
# return self.leaves[-1].parent
#
# @staticmethod
# def support_list(current_node):
# list_ = []
# while current_node:
# list_.append(current_node.support)
# current_node = current_node.parent
# return list_
#
# def optimal_support_list(self):
# list_ = []
# current_node = self.get_lower_optimal_node()
# while current_node:
# list_.append(current_node.support)
# current_node = current_node.parent
# return list_
| 37.135135
| 81
| 0.533236
| 8,080
| 0.980107
| 0
| 0
| 0
| 0
| 0
| 0
| 2,788
| 0.338185
|
55f78570dc2c54902bbba417e6ce4621cf9434e6
| 1,819
|
py
|
Python
|
miniGithub/migrations/0003_auto_20200119_0955.py
|
stefan096/UKS
|
aeabe6a9995143c006ad4143e8e876a102e9d69b
|
[
"MIT"
] | null | null | null |
miniGithub/migrations/0003_auto_20200119_0955.py
|
stefan096/UKS
|
aeabe6a9995143c006ad4143e8e876a102e9d69b
|
[
"MIT"
] | 36
|
2020-01-12T17:00:23.000Z
|
2020-03-21T13:25:28.000Z
|
miniGithub/migrations/0003_auto_20200119_0955.py
|
stefan096/UKS
|
aeabe6a9995143c006ad4143e8e876a102e9d69b
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.2 on 2020-01-19 09:55
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('miniGithub', '0002_project_owner'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('custom_event_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='miniGithub.Custom_Event')),
('description', models.CharField(max_length=500)),
],
bases=('miniGithub.custom_event',),
),
migrations.AlterField(
model_name='custom_event',
name='creator',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.CreateModel(
name='Problem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('base_problem', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='problem', to='miniGithub.Problem')),
('project', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='miniGithub.Project')),
],
),
migrations.AddField(
model_name='custom_event',
name='problem',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='miniGithub.Problem'),
),
]
| 41.340909
| 206
| 0.632216
| 1,660
| 0.912589
| 0
| 0
| 0
| 0
| 0
| 0
| 331
| 0.181968
|
55f7fc91f85571caa12221e2e54d28b60ea32a14
| 4,468
|
py
|
Python
|
megatron/model/gpt_model.py
|
vat99/Megatron-LM
|
fd61ae95aa8f3f41aa970cb86e943a7e5bfe0d1a
|
[
"MIT"
] | 1
|
2022-03-29T09:16:39.000Z
|
2022-03-29T09:16:39.000Z
|
megatron/model/gpt_model.py
|
vat99/Megatron-LM
|
fd61ae95aa8f3f41aa970cb86e943a7e5bfe0d1a
|
[
"MIT"
] | 5
|
2022-01-20T08:06:03.000Z
|
2022-03-10T10:01:32.000Z
|
megatron/model/gpt_model.py
|
vat99/Megatron-LM
|
fd61ae95aa8f3f41aa970cb86e943a7e5bfe0d1a
|
[
"MIT"
] | 1
|
2022-03-25T12:00:47.000Z
|
2022-03-25T12:00:47.000Z
|
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GPT-2 model."""
import torch
from megatron import get_args
from megatron import mpu
from .module import MegatronModule
from .enums import AttnMaskType
from .language_model import parallel_lm_logits
from .language_model import get_language_model
from .utils import init_method_normal
from .utils import scaled_init_method_normal
def post_language_model_processing(lm_output, labels, logit_weights,
parallel_output,
fp16_lm_cross_entropy):
# Output.
output = parallel_lm_logits(
lm_output,
logit_weights,
parallel_output)
if labels is None:
return output
else:
if fp16_lm_cross_entropy:
assert output.dtype == torch.half
loss = mpu.vocab_parallel_cross_entropy(output, labels)
else:
loss = mpu.vocab_parallel_cross_entropy(output.float(), labels)
return loss
class GPTModel(MegatronModule):
"""GPT-2 Language model."""
def __init__(self,
num_tokentypes=0,
parallel_output=True,
pre_process=True,
post_process=True):
super(GPTModel, self).__init__()
args = get_args()
self.parallel_output = parallel_output
self.pre_process = pre_process
self.post_process = post_process
self.fp16_lm_cross_entropy = args.fp16_lm_cross_entropy
self.language_model, self._language_model_key = get_language_model(
num_tokentypes=num_tokentypes,
add_pooler=False,
encoder_attn_mask_type=AttnMaskType.causal,
init_method=init_method_normal(args.init_method_std),
scaled_init_method=scaled_init_method_normal(args.init_method_std,
args.num_layers),
pre_process=self.pre_process,
post_process=self.post_process)
self.initialize_word_embeddings(init_method_normal)
def set_input_tensor(self, input_tensor):
"""See megatron.model.transformer.set_input_tensor()"""
self.language_model.set_input_tensor(input_tensor)
def forward(self, input_ids, position_ids, attention_mask, labels=None,
tokentype_ids=None, inference_params=None):
lm_output = self.language_model(
input_ids,
position_ids,
attention_mask,
inference_params=inference_params)
if self.post_process:
return post_language_model_processing(
lm_output, labels,
self.word_embeddings_weight(),
self.parallel_output,
self.fp16_lm_cross_entropy)
else:
return lm_output
def state_dict_for_save_checkpoint(self, destination=None, prefix='',
keep_vars=False):
state_dict_ = {}
state_dict_[self._language_model_key] \
= self.language_model.state_dict_for_save_checkpoint(
destination, prefix, keep_vars)
# Save word_embeddings.
if self.post_process and not self.pre_process:
state_dict_[self._word_embeddings_for_head_key] \
= self.word_embeddings.state_dict(destination, prefix, keep_vars)
return state_dict_
def load_state_dict(self, state_dict, strict=True):
"""Customized load."""
# Load word_embeddings.
if self.post_process and not self.pre_process:
self.word_embeddings.load_state_dict(
state_dict[self._word_embeddings_for_head_key], strict=strict)
if self._language_model_key in state_dict:
state_dict = state_dict[self._language_model_key]
self.language_model.load_state_dict(state_dict, strict=strict)
| 35.744
| 81
| 0.660922
| 2,894
| 0.647717
| 0
| 0
| 0
| 0
| 0
| 0
| 790
| 0.176813
|
55f88475538cbd35f162e1da477042bc863348a2
| 67
|
py
|
Python
|
python/testData/inspections/PyMethodMayBeStaticInspection/documentedEmpty.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2018-12-29T09:53:39.000Z
|
2018-12-29T09:53:42.000Z
|
python/testData/inspections/PyMethodMayBeStaticInspection/documentedEmpty.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/inspections/PyMethodMayBeStaticInspection/documentedEmpty.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
class A:
def foo(self):
"""Do something"""
pass
| 16.75
| 26
| 0.462687
| 67
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 18
| 0.268657
|
55f89e67422221688251900fa69112d9cc2e2083
| 5,324
|
py
|
Python
|
tests/utest/test_default_config.py
|
ngoan1608/robotframework-robocop
|
3444bbc98102f74ebae08dcb26cd63346f9ed03e
|
[
"Apache-2.0"
] | 2
|
2021-12-22T01:50:52.000Z
|
2022-01-05T06:32:27.000Z
|
tests/utest/test_default_config.py
|
marcel-veselka/robotframework-robocop
|
4711c0dd389baa2d0346e62e1fda3c02c2dcc73b
|
[
"Apache-2.0"
] | null | null | null |
tests/utest/test_default_config.py
|
marcel-veselka/robotframework-robocop
|
4711c0dd389baa2d0346e62e1fda3c02c2dcc73b
|
[
"Apache-2.0"
] | 1
|
2021-06-30T11:01:51.000Z
|
2021-06-30T11:01:51.000Z
|
import os
import sys
import importlib
from pathlib import Path
from unittest.mock import patch
import pytest
import robocop.config
from robocop.exceptions import InvalidArgumentError
@pytest.fixture
def config():
return robocop.config.Config()
@pytest.fixture
def path_to_test_data():
return Path(Path(__file__).parent.parent, 'test_data')
class TestDefaultConfig:
def test_find_project_root_same_dir(self, path_to_test_data, config):
src = path_to_test_data / 'default_config'
os.chdir(str(src))
root = config.find_file_in_project_root('.robocop')
assert root == src / '.robocop'
def test_find_project_root_missing_but_git(self, path_to_test_data, config):
src = path_to_test_data / 'default_config_missing' / 'nested' / 'deeper'
os.chdir(str(src))
root = config.find_file_in_project_root('.robocop')
assert root == Path(__file__).parent.parent.parent / '.robocop'
def test_load_config_from_default_file(self, path_to_test_data, config):
src = path_to_test_data / 'default_config'
os.chdir(str(src))
with patch.object(sys, 'argv', ['prog']):
config.parse_opts()
assert {'0810'} == config.include
def test_load_config_from_default_file_verbose(self, path_to_test_data, config, capsys):
src = path_to_test_data / 'default_config'
os.chdir(str(src))
config.from_cli = True
config.exec_dir = str(src)
with patch.object(sys, 'argv', ['prog', '--verbose']):
config.parse_opts()
out, _ = capsys.readouterr()
assert out == f'Loaded configuration from {config.config_from}\n'
def test_ignore_config_from_default_file(self, path_to_test_data, config):
src = path_to_test_data / 'default_config'
os.chdir(str(src))
with patch.object(sys, 'argv', ['prog', '--include', '0202']):
config.parse_opts()
assert {'0202'} == config.include
def test_load_default_config_before_pyproject(self, path_to_test_data, config):
src = path_to_test_data / 'default_config_and_pyproject'
os.chdir(str(src))
with patch.object(sys, 'argv', ['prog']):
config.parse_opts()
assert {'0810'} == config.include
def test_pyproject(self, path_to_test_data, config):
src = path_to_test_data / 'only_pyproject'
os.chdir(str(src))
config.from_cli = True
config.exec_dir = str(src)
with patch.object(sys, 'argv', ['prog']):
config.parse_opts()
expected_config = robocop.config.Config(from_cli=True)
with patch.object(sys, 'argv', [
'robocop', '--include', 'W0504', '-i', '*doc*', '--exclude', '0203', '--reports',
'rules_by_id,scan_timer', '--ignore', 'ignore_me.robot', '--ext-rules', 'path_to_external\\dir',
'--filetypes', '.txt,csv', '--threshold', 'E', '--no-recursive', '--format',
'{source}:{line}:{col} [{severity}] {rule_id} {desc} (name)1', '--output', 'robocop.log', '--configure',
'line-too-long:line_length:150', '-c', '0201:severity:E', 'tests\\atest\\rules\\bad-indent',
'tests\\atest\\rules\\duplicated-library'
]):
expected_config.parse_opts()
config.config_from = ''
config.parser, expected_config.parser = None, None
config.output, expected_config.output = None, None
assert len(config.include_patterns) == len(expected_config.include_patterns)
config.include_patterns, expected_config.include_patterns = None, None
assert config.__dict__ == expected_config.__dict__
def test_pyproject_verbose(self, path_to_test_data, config, capsys):
src = path_to_test_data / 'only_pyproject'
os.chdir(str(src))
config.from_cli = True
config.exec_dir = str(src)
with patch.object(sys, 'argv', ['prog', '--verbose']):
config.parse_opts()
out, _ = capsys.readouterr()
assert out == f'Loaded configuration from {config.config_from}\n'
def test_not_supported_option_pyproject(self, path_to_test_data, config):
src = path_to_test_data / 'not_supported_option_pyproject'
os.chdir(str(src))
with pytest.raises(InvalidArgumentError) as e, patch.object(sys, 'argv', ['prog']):
config.parse_opts()
assert "Invalid configuration for Robocop:\\n" \
"Option 'list' is not supported in pyproject.toml configuration file." in str(e)
def test_invalid_toml_pyproject(self, path_to_test_data, config):
src = path_to_test_data / 'invalid_pyproject'
os.chdir(str(src))
with pytest.raises(InvalidArgumentError) as e, patch.object(sys, 'argv', ['prog']):
config.parse_opts()
assert "Invalid configuration for Robocop:\\nFailed to decode " in str(e)
def test_toml_not_installed_pyproject(self, path_to_test_data):
src = path_to_test_data / 'only_pyproject'
os.chdir(str(src))
with patch.dict('sys.modules', {'toml': None}):
importlib.reload(robocop.config)
config = robocop.config.Config()
with patch.object(sys, 'argv', ['prog']):
config.parse_opts()
assert config.include == set()
| 42.592
| 116
| 0.646506
| 4,967
| 0.932945
| 0
| 0
| 163
| 0.030616
| 0
| 0
| 1,184
| 0.222389
|
55f8affa309482626692f2a65c9326ebb9be7625
| 646
|
py
|
Python
|
tests/test_forms.py
|
haoziyeung/elasticstack
|
1fb4eb46317b402e0617badbc9034fb411a39992
|
[
"BSD-2-Clause"
] | 2
|
2020-11-23T11:03:03.000Z
|
2020-11-23T11:03:31.000Z
|
tests/test_forms.py
|
haoziyeung/elasticstack
|
1fb4eb46317b402e0617badbc9034fb411a39992
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_forms.py
|
haoziyeung/elasticstack
|
1fb4eb46317b402e0617badbc9034fb411a39992
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_elasticstack
------------
Tests for `elasticstack` forms module.
"""
from django import forms
from django.test import TestCase
from elasticstack.forms import SearchForm
class TestForms(TestCase):
def test_named_search_field(self):
"""Ensure that the `q` field can be optionally used"""
class MyForm(SearchForm):
s = forms.CharField(label='Search')
f = forms.CharField(label='More search')
search_field_name = 's'
form = MyForm()
self.assertTrue('s' in form.fields)
self.assertFalse('q' in form.fields)
| 21.533333
| 62
| 0.633127
| 415
| 0.642415
| 0
| 0
| 0
| 0
| 0
| 0
| 206
| 0.318885
|
55fa09f3a8c3fad0ee952c33bd12012b56fb9d68
| 668
|
py
|
Python
|
AnkiIn/notetypes/ListCloze.py
|
Clouder0/AnkiIn
|
ca944bb9f79ce49bc2db62a0bfaeffe7908b48da
|
[
"MIT"
] | 1
|
2021-07-04T08:10:53.000Z
|
2021-07-04T08:10:53.000Z
|
AnkiIn/notetypes/ListCloze.py
|
Clouder0/AnkiIn
|
ca944bb9f79ce49bc2db62a0bfaeffe7908b48da
|
[
"MIT"
] | 35
|
2021-07-03T10:50:20.000Z
|
2022-01-09T09:33:17.000Z
|
AnkiIn/notetypes/ListCloze.py
|
Clouder0/AnkiIn
|
ca944bb9f79ce49bc2db62a0bfaeffe7908b48da
|
[
"MIT"
] | 2
|
2021-08-21T11:33:00.000Z
|
2021-10-15T18:59:33.000Z
|
from .Cloze import get as cget
from ..config import dict as conf
from ..config import config_updater
notetype_name = "ListCloze"
if notetype_name not in conf["notetype"]:
conf["notetype"][notetype_name] = {}
settings = conf["notetype"][notetype_name]
priority = None
def update_list_cloze_config():
global settings, priority
priority = settings.get("priority", 15)
config_updater.append((update_list_cloze_config, 15))
def check(lines: list, extra_params={}) -> bool:
return lines[0].startswith("- ") or lines[0].startswith(r"* ")
def get(text: str, deck: str, tags: list, extra_params={}):
return cget(text=text, deck=deck, tags=tags)
| 23.034483
| 66
| 0.712575
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 60
| 0.08982
|
55fadfd4280d478b35858e331edea1ce48c5383a
| 9,697
|
py
|
Python
|
app/routes.py
|
ptkaczyk/Ithacartists
|
0d8effafe64b29ae1756169cac1eb4d6bc980c1d
|
[
"MIT"
] | null | null | null |
app/routes.py
|
ptkaczyk/Ithacartists
|
0d8effafe64b29ae1756169cac1eb4d6bc980c1d
|
[
"MIT"
] | null | null | null |
app/routes.py
|
ptkaczyk/Ithacartists
|
0d8effafe64b29ae1756169cac1eb4d6bc980c1d
|
[
"MIT"
] | null | null | null |
from flask import render_template, Flask, flash, redirect, url_for, abort, request
from flask_login import login_user, logout_user, login_required
from werkzeug.urls import url_parse
from app import app, db
from app.forms import *
from app.models import *
@app.route('/')
@app.route('/landing')
def landing():
return render_template('Landing.html', title='Landing')
@app.route('/artistlist')
def artistlist():
artists=Artist.query.all()
return render_template('Artists.html', artists=artists, title='Artists')
@app.route('/login', methods=['GET', 'POST'])
def login():
form = loginForm()
if form.validate_on_submit():
user=User.query.filter_by(username=form.username.data).first()
if user is None or not user.check_password(form.password.data):
flash('Incorrect name or password')
return redirect(url_for('login'))
login_user(user)
return redirect(url_for('landing'))
return render_template('Login.html', form=form, title='Login')
@app.route('/search', methods=['GET','POST'])
def search():
searched = Product.query.all()
form = searchForm()
if form.validate_on_submit():
searched = Product.query.filter_by(name=form.searchable.data).all()
return render_template('search.html', searchable=searched, form=form, title='Search')
@app.route('/user/<name>')
def user(name):
if len(User.query.filter_by(username=name).all()) > 0:
chosenUser = User.query.filter_by(username=name).first()
chosenProducts = Product.query.filter_by(Id=chosenUser.id).all()
return render_template('user.html', title='User', userName=chosenUser.username, chosenUser=chosenUser,
productList=chosenProducts)
else:
abort(404)
@app.route('/product/<productName>')
def product(productName):
if len(Product.query.filter_by(name=productName).all()) > 0:
chosenProduct=Product.query.filter_by(name=productName).first()
chosenUser=User.query.filter_by(id=chosenProduct.userId).first()
userName=chosenUser.username
return render_template('product.html', title='Product', name=productName, userPosting=userName,
description=chosenProduct.description, date=chosenProduct.dateHarvested,
productPrice=chosenProduct.price, amount=chosenProduct.amount)
else:
abort(404)
@app.route('/newProduct', methods=['GET','POST'])
def newProduct():
form = productForm()
if form.validate_on_submit():
flash('New product created: {}'.format(form.name.data))
newP = Product(name=form.name.data, description=form.description.data, price=form.price.data, amount=form.amount.data, dateHarvested=form.date.data, userId=4)
db.session.add(newP)
db.session.commit()
return redirect(url_for('landing'))
return render_template('newProduct.html', title='New Product', form=form)
@app.route('/newartist', methods=['GET', 'POST'])
@login_required
def newartist():
form = artistForm()
if form.validate_on_submit():
if len(Artist.query.filter_by(firstname=form.artistName.data).all()) > 0:
flash('That name already exists')
else:
flash('New page created: {}'.format(form.artistName.data))
newA = Artist(firstname=form.artistName.data, lastname='', hometown=form.hometown.data, description=form.description.data)
db.session.add(newA)
db.session.commit()
return redirect(url_for('artistlist'))
return render_template('NewArtist.html', form=form, title='New Artist')
@app.route('/newvenue', methods=['GET','POST'])
def newvenue():
form = venueForm()
if form.validate_on_submit():
if len(Venue.query.filter_by(name=form.name.data).all()) > 0:
flash('That venue already exists')
else:
flash('New venue created: {}'.format(form.name.data))
newV = Venue(name=form.name.data, description=form.description.data)
db.session.add(newV)
db.session.commit()
return redirect(url_for('artistlist'))
return render_template('NewVenue.html', title='New Venue', form=form)
@app.route('/newevent', methods=['GET', 'POST'])
def newevent():
form = eventForm()
form.venue.choices = [(venue.id, venue.name) for venue in Venue.query.all()]
form.artists.choices = [(artist.id, artist.firstname) for artist in Artist.query.all()]
if form.validate_on_submit():
if len(Event.query.filter_by(name=form.name.data).all()) > 0:
flash('That event already exists')
else:
flash('New event created: {}'.format(form.name.data))
newE = Event(name=form.name.data, description=form.description.data, time=form.time.data, venueId=form.venue.data)
db.session.add(newE)
db.session.commit()
for a in form.artists.data:
newX = ArtistToEvent(artistId=Artist.query.filter_by(id=a).first().id, eventId=newE.id)
db.session.add(newX)
db.session.commit()
return redirect(url_for('artistlist'))
return render_template('NewEvent.html', title='New Event', form=form)
@app.route('/artist/<name>')
#instructor = Instructor.query.filter_by(firstname="Alex").first()
def artist(name):
if len(Artist.query.filter_by(firstname=name).all()) > 0:
chosenArtist=Artist.query.filter_by(firstname=name).first()
chosenJoins=ArtistToEvent.query.filter_by(artistId=chosenArtist.id).all()
chosenEvents = []
trackingInt=0
for oneEvent in chosenJoins:
chosenEvents.append(Event.query.filter_by(id=chosenJoins[trackingInt].eventId).first())
trackingInt=trackingInt+1
#chosenEvents=Event.query.filter_by(id=chosenJoin.eventId).all()
return render_template('Artist.html', title='Artist', artistName=chosenArtist.firstname, hometown=chosenArtist.hometown, description=chosenArtist.description, event_list=chosenEvents)
else:
abort(404)
@app.route('/register', methods=['GET','POST'])
def register():
form = registerForm()
if form.validate_on_submit():
if len(User.query.filter_by(username=form.username.data).all()) > 0:
flash('That name already exists')
else:
flash('New user created. You can now log in.')
newU= User(username=form.username.data, password=form.password.data)
newU.set_password(form.password.data)
db.session.add(newU)
db.session.commit()
return redirect(url_for('landing'))
return render_template('Register.html', form=form, title='Register')
@app.route('/logout')
def logout():
logout_user()
flash("User has been logged out.")
return redirect(url_for('landing'))
@app.route('/populate_db')
def populate_db():
a1=Artist(firstname='Anne', lastname='Apricot', hometown='Ithaca', description='A')
a2=Artist(firstname='Ben', lastname='Barrel', hometown='Ithaca', description='B')
a3=Artist(firstname='Cathy', lastname='Chowder', hometown='Ithaca', description='C')
a4=Artist(firstname='Dan', lastname='Derringer', hometown='Delanson', description='D')
e1=Event(name='Augustfest', description='A', venueId='0')
e2 = Event(name='Burgerfest', description='B', venueId='1')
e3 = Event(name='Ciderfest', description='C', venueId='2')
e4 = Event(name='Donutfest', description='D', venueId='1')
e5 = Event(name='Earwigfest', description='E', venueId='1')
e6 = Event(name='Falafelfest', description='F', venueId='2')
ate1 = ArtistToEvent(artistId=1, eventId=1)
ate2 = ArtistToEvent(artistId=2, eventId=2)
ate3 = ArtistToEvent(artistId=3, eventId=3)
ate4 = ArtistToEvent(artistId=4, eventId=4)
ate5 = ArtistToEvent(artistId=1, eventId=5)
ate6 = ArtistToEvent(artistId=2, eventId=5)
ate7 = ArtistToEvent(artistId=3, eventId=6)
ate8 = ArtistToEvent(artistId=1, eventId=6)
v1 = Venue(name='Adelide Acres', description='A')
v2 = Venue(name='Baltimore Barrelers', description='B')
v3 = Venue(name='Canary Church', description='C')
u1 = User(username='Peter',password='Tkaczyk')
u1.set_password('Tkaczyk')
u2 = User(username='Old Man McFarmer', password='Farmlivin')
u2.set_password('Farmlivin')
u3 = User(username='Young Man McFarmer', password='ILovFarm')
u3.set_password('ILovFarm')
p1 = Product(name='Eggs', amount = 12, dateHarvested = '12-12-2020', description = 'delicious eggs', price = '$0.99'
, userId=1)
p2 = Product(name='Tomatoes', amount=20, dateHarvested='12-14-2020', description='delicious tomatoes', price='$1.99',
userId=2)
p3 = Product(name='Beets', amount=30, dateHarvested='12-10-2020', description='delicious beets', price='$2.99'
, userId=3)
p4 = Product(name='Bacon', amount=10, dateHarvested='11-20-2020', description='delicious bacon', price='$3.99',
userId=2)
p5 = Product(name='Turnips', amount=40, dateHarvested='12-10-2020', description='delicious turnips', price='$4.99',
userId=3)
db.session.add_all([u1, u2, u3, p1, p2, p3, p4, p5])
db.session.commit()
return "database has been populated."
@app.route('/reset_db')
def reset_db():
flash("Resetting database: deleting old data and repopulating with dummy data")
meta = db.metadata
for table in reversed(meta.sorted_tables):
print('Clear table {}'.format(table))
db.session.execute(table.delete())
db.session.commit()
populate_db()
return "Reset and repopulated data."
| 42.530702
| 191
| 0.662267
| 0
| 0
| 0
| 0
| 9,400
| 0.969372
| 0
| 0
| 1,800
| 0.185624
|
55fb46ee1813e2c980cdc6a6a49ca860bf41a84e
| 2,861
|
py
|
Python
|
src/bloombox/schema/services/devices/v1beta1/DevicesService_Beta1_pb2_grpc.py
|
Bloombox/Python
|
1b125fbdf54efb390afe12aaa966f093218c4387
|
[
"Apache-2.0"
] | 4
|
2018-01-23T20:13:11.000Z
|
2018-07-28T22:36:09.000Z
|
src/bloombox/schema/services/devices/v1beta1/DevicesService_Beta1_pb2_grpc.py
|
Bloombox/Python
|
1b125fbdf54efb390afe12aaa966f093218c4387
|
[
"Apache-2.0"
] | 159
|
2018-02-02T09:55:52.000Z
|
2021-07-21T23:41:59.000Z
|
src/bloombox/schema/services/devices/v1beta1/DevicesService_Beta1_pb2_grpc.py
|
Bloombox/Python
|
1b125fbdf54efb390afe12aaa966f093218c4387
|
[
"Apache-2.0"
] | 3
|
2018-01-23T20:13:15.000Z
|
2020-01-17T01:07:53.000Z
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from devices.v1beta1 import DevicesService_Beta1_pb2 as devices_dot_v1beta1_dot_DevicesService__Beta1__pb2
class DevicesStub(object):
"""Specifies the devices service, which enables managed devices to check-in, authorize themselves, and discover their
identity/role.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Ping = channel.unary_unary(
'/bloombox.schema.services.devices.v1beta1.Devices/Ping',
request_serializer=devices_dot_v1beta1_dot_DevicesService__Beta1__pb2.Ping.Request.SerializeToString,
response_deserializer=devices_dot_v1beta1_dot_DevicesService__Beta1__pb2.Ping.Response.FromString,
)
self.Activate = channel.unary_unary(
'/bloombox.schema.services.devices.v1beta1.Devices/Activate',
request_serializer=devices_dot_v1beta1_dot_DevicesService__Beta1__pb2.Activation.Request.SerializeToString,
response_deserializer=devices_dot_v1beta1_dot_DevicesService__Beta1__pb2.Activation.Response.FromString,
)
class DevicesServicer(object):
"""Specifies the devices service, which enables managed devices to check-in, authorize themselves, and discover their
identity/role.
"""
def Ping(self, request, context):
"""Ping the device server.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Activate(self, request, context):
"""Setup and enable a device for live use. If this is the first time the subject device has activated itself,
initialize or otherwise provision any requisite objects or resources.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_DevicesServicer_to_server(servicer, server):
rpc_method_handlers = {
'Ping': grpc.unary_unary_rpc_method_handler(
servicer.Ping,
request_deserializer=devices_dot_v1beta1_dot_DevicesService__Beta1__pb2.Ping.Request.FromString,
response_serializer=devices_dot_v1beta1_dot_DevicesService__Beta1__pb2.Ping.Response.SerializeToString,
),
'Activate': grpc.unary_unary_rpc_method_handler(
servicer.Activate,
request_deserializer=devices_dot_v1beta1_dot_DevicesService__Beta1__pb2.Activation.Request.FromString,
response_serializer=devices_dot_v1beta1_dot_DevicesService__Beta1__pb2.Activation.Response.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'bloombox.schema.services.devices.v1beta1.Devices', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| 42.701493
| 119
| 0.774205
| 1,757
| 0.614121
| 0
| 0
| 0
| 0
| 0
| 0
| 921
| 0.321915
|
55fb9d49fcf1a873c80991e0f909fcb04543c2ba
| 10,052
|
py
|
Python
|
oslo-modules/oslo_messaging/_drivers/amqp.py
|
esse-io/zen-common
|
8ede82ab81bad53c3b947084b812c44e329f159b
|
[
"Apache-2.0"
] | 1
|
2021-02-17T15:30:45.000Z
|
2021-02-17T15:30:45.000Z
|
oslo-modules/oslo_messaging/_drivers/amqp.py
|
esse-io/zen-common
|
8ede82ab81bad53c3b947084b812c44e329f159b
|
[
"Apache-2.0"
] | null | null | null |
oslo-modules/oslo_messaging/_drivers/amqp.py
|
esse-io/zen-common
|
8ede82ab81bad53c3b947084b812c44e329f159b
|
[
"Apache-2.0"
] | 2
|
2015-11-03T03:21:55.000Z
|
2015-12-01T08:56:14.000Z
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 - 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Shared code between AMQP based openstack.common.rpc implementations.
The code in this module is shared between the rpc implementations based on
AMQP. Specifically, this includes impl_kombu and impl_qpid. impl_carrot also
uses AMQP, but is deprecated and predates this code.
"""
import collections
import logging
import uuid
from oslo_config import cfg
import six
from oslo_messaging._drivers import common as rpc_common
from oslo_messaging._drivers import pool
deprecated_durable_opts = [
cfg.DeprecatedOpt('amqp_durable_queues',
group='DEFAULT'),
cfg.DeprecatedOpt('rabbit_durable_queues',
group='DEFAULT')
]
amqp_opts = [
cfg.BoolOpt('amqp_durable_queues',
default=False,
deprecated_opts=deprecated_durable_opts,
help='Use durable queues in AMQP.'),
cfg.BoolOpt('amqp_auto_delete',
default=False,
deprecated_group='DEFAULT',
help='Auto-delete queues in AMQP.'),
cfg.BoolOpt('send_single_reply',
default=False,
help='Send a single AMQP reply to call message. The current '
'behaviour since oslo-incubator is to send two AMQP '
'replies - first one with the payload, a second one to '
'ensure the other have finish to send the payload. We '
'are going to remove it in the N release, but we must '
'keep backward compatible at the same time. This option '
'provides such compatibility - it defaults to False in '
'Liberty and can be turned on for early adopters with a '
'new installations or for testing. Please note, that '
'this option will be removed in the Mitaka release.')
]
UNIQUE_ID = '_unique_id'
LOG = logging.getLogger(__name__)
# NOTE(sileht): Even if rabbit/qpid have only one Connection class,
# this connection can be used for two purposes:
# * wait and receive amqp messages (only do read stuffs on the socket)
# * send messages to the broker (only do write stuffs on the socket)
# The code inside a connection class is not concurrency safe.
# Using one Connection class instance for doing both, will result
# of eventlet complaining of multiple greenthreads that read/write the
# same fd concurrently... because 'send' and 'listen' run in different
# greenthread.
# So, a connection cannot be shared between thread/greenthread and
# this two variables permit to define the purpose of the connection
# to allow drivers to add special handling if needed (like heatbeat).
# amqp drivers create 3 kind of connections:
# * driver.listen*(): each call create a new 'PURPOSE_LISTEN' connection
# * driver.send*(): a pool of 'PURPOSE_SEND' connections is used
# * driver internally have another 'PURPOSE_LISTEN' connection dedicated
# to wait replies of rpc call
PURPOSE_LISTEN = 'listen'
PURPOSE_SEND = 'send'
class ConnectionPool(pool.Pool):
"""Class that implements a Pool of Connections."""
def __init__(self, conf, rpc_conn_pool_size, url, connection_cls):
self.connection_cls = connection_cls
self.conf = conf
self.url = url
super(ConnectionPool, self).__init__(rpc_conn_pool_size)
self.reply_proxy = None
# TODO(comstud): Timeout connections not used in a while
def create(self, purpose=None):
if purpose is None:
purpose = PURPOSE_SEND
LOG.debug('Pool creating new connection')
return self.connection_cls(self.conf, self.url, purpose)
def empty(self):
for item in self.iter_free():
item.close()
class ConnectionContext(rpc_common.Connection):
"""The class that is actually returned to the create_connection() caller.
This is essentially a wrapper around Connection that supports 'with'.
It can also return a new Connection, or one from a pool.
The function will also catch when an instance of this class is to be
deleted. With that we can return Connections to the pool on exceptions
and so forth without making the caller be responsible for catching them.
If possible the function makes sure to return a connection to the pool.
"""
def __init__(self, connection_pool, purpose):
"""Create a new connection, or get one from the pool."""
self.connection = None
self.connection_pool = connection_pool
pooled = purpose == PURPOSE_SEND
if pooled:
self.connection = connection_pool.get()
else:
# a non-pooled connection is requested, so create a new connection
self.connection = connection_pool.create(purpose)
self.pooled = pooled
self.connection.pooled = pooled
def __enter__(self):
"""When with ConnectionContext() is used, return self."""
return self
def _done(self):
"""If the connection came from a pool, clean it up and put it back.
If it did not come from a pool, close it.
"""
if self.connection:
if self.pooled:
# Reset the connection so it's ready for the next caller
# to grab from the pool
try:
self.connection.reset()
except Exception:
LOG.exception("Fail to reset the connection, drop it")
try:
self.connection.close()
except Exception:
pass
self.connection = self.connection_pool.create()
finally:
self.connection_pool.put(self.connection)
else:
try:
self.connection.close()
except Exception:
pass
self.connection = None
def __exit__(self, exc_type, exc_value, tb):
"""End of 'with' statement. We're done here."""
self._done()
def __del__(self):
"""Caller is done with this connection. Make sure we cleaned up."""
self._done()
def close(self):
"""Caller is done with this connection."""
self._done()
def __getattr__(self, key):
"""Proxy all other calls to the Connection instance."""
if self.connection:
return getattr(self.connection, key)
else:
raise rpc_common.InvalidRPCConnectionReuse()
class RpcContext(rpc_common.CommonRpcContext):
"""Context that supports replying to a rpc.call."""
def __init__(self, **kwargs):
self.msg_id = kwargs.pop('msg_id', None)
self.reply_q = kwargs.pop('reply_q', None)
self.conf = kwargs.pop('conf')
super(RpcContext, self).__init__(**kwargs)
def deepcopy(self):
values = self.to_dict()
values['conf'] = self.conf
values['msg_id'] = self.msg_id
values['reply_q'] = self.reply_q
return self.__class__(**values)
def unpack_context(conf, msg):
"""Unpack context from msg."""
context_dict = {}
for key in list(msg.keys()):
key = six.text_type(key)
if key.startswith('_context_'):
value = msg.pop(key)
context_dict[key[9:]] = value
context_dict['msg_id'] = msg.pop('_msg_id', None)
context_dict['reply_q'] = msg.pop('_reply_q', None)
context_dict['conf'] = conf
return RpcContext.from_dict(context_dict)
def pack_context(msg, context):
"""Pack context into msg.
Values for message keys need to be less than 255 chars, so we pull
context out into a bunch of separate keys. If we want to support
more arguments in rabbit messages, we may want to do the same
for args at some point.
"""
if isinstance(context, dict):
context_d = six.iteritems(context)
else:
context_d = six.iteritems(context.to_dict())
msg.update(('_context_%s' % key, value)
for (key, value) in context_d)
class _MsgIdCache(object):
"""This class checks any duplicate messages."""
# NOTE: This value is considered can be a configuration item, but
# it is not necessary to change its value in most cases,
# so let this value as static for now.
DUP_MSG_CHECK_SIZE = 16
def __init__(self, **kwargs):
self.prev_msgids = collections.deque([],
maxlen=self.DUP_MSG_CHECK_SIZE)
def check_duplicate_message(self, message_data):
"""AMQP consumers may read same message twice when exceptions occur
before ack is returned. This method prevents doing it.
"""
try:
msg_id = message_data.pop(UNIQUE_ID)
except KeyError:
return
if msg_id in self.prev_msgids:
raise rpc_common.DuplicateMessageError(msg_id=msg_id)
return msg_id
def add(self, msg_id):
if msg_id and msg_id not in self.prev_msgids:
self.prev_msgids.append(msg_id)
def _add_unique_id(msg):
"""Add unique_id for checking duplicate messages."""
unique_id = uuid.uuid4().hex
msg.update({UNIQUE_ID: unique_id})
class AMQPDestinationNotFound(Exception):
pass
| 37.092251
| 78
| 0.643355
| 5,096
| 0.506964
| 0
| 0
| 0
| 0
| 0
| 0
| 4,990
| 0.496419
|
55fbb1e9d0d4e9b678c1d12c81f6b84f0a9bebb8
| 1,551
|
py
|
Python
|
scripts/agenda.py
|
benjaminogles/vim-head
|
be3e01b53d314b6f7e0d72a736fe40f38de2cf5f
|
[
"MIT"
] | 3
|
2020-04-13T17:47:05.000Z
|
2020-05-11T17:23:02.000Z
|
scripts/agenda.py
|
benjaminogles/vim-head
|
be3e01b53d314b6f7e0d72a736fe40f38de2cf5f
|
[
"MIT"
] | 3
|
2020-04-13T16:51:27.000Z
|
2020-04-13T16:53:54.000Z
|
scripts/agenda.py
|
benjaminogles/vim-head
|
be3e01b53d314b6f7e0d72a736fe40f38de2cf5f
|
[
"MIT"
] | null | null | null |
#!/bin/python3
import datetime
import itertools
import sys
from heading import *
days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
def priority_key():
weights = {}
sep = KEYWORDS.index('|')
for keyword in KEYWORDS[sep+1:]:
weights[keyword] = len(KEYWORDS)
idx = 1
while idx <= sep:
weights[KEYWORDS[sep - idx]] = idx
idx += 1
return lambda heading: weights[heading.keyword] if heading.keyword in weights else len(weights.keys()) - 1
def date_key(heading):
if heading.date is None:
return datetime.date(datetime.MAXYEAR, 1, 1)
return heading.date
def has_date(heading):
return heading.date is not None
def is_pending(heading):
if heading.keyword not in KEYWORDS:
return False
return KEYWORDS.index(heading.keyword) < KEYWORDS.index('|')
if __name__ == '__main__':
import argparse
inputs = from_fields_file(sys.stdin)
todos = filter(has_date, inputs)
todos = filter(is_pending, todos)
todos = sorted(todos, key=date_key)
todos = itertools.groupby(todos, key=date_key)
today = datetime.date.today()
warned = False
for date, todo_group in todos:
if date < today and not warned:
warned = True
print('\n! Overdue !')
elif date == today:
print ('\n= Today =')
elif date > today:
print('\n= %s %s =' % (days[date.weekday()], date))
prioritized = sorted(todo_group, key=priority_key())
for todo in prioritized:
print(todo)
| 27.210526
| 110
| 0.617666
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 106
| 0.068343
|
55fbb54a4881fb0eed71b1a082583ae85646db84
| 5,635
|
py
|
Python
|
clusterpy/core/toolboxes/cluster/componentsAlg/areamanager.py
|
CentroGeo/clusterpy_python3
|
5c2600b048836e54495dc5997a250af72f72f6e7
|
[
"BSD-3-Clause"
] | 3
|
2019-09-29T15:27:57.000Z
|
2021-01-23T02:05:07.000Z
|
clusterpy/core/toolboxes/cluster/componentsAlg/areamanager.py
|
CentroGeo/clusterpy_python3
|
5c2600b048836e54495dc5997a250af72f72f6e7
|
[
"BSD-3-Clause"
] | null | null | null |
clusterpy/core/toolboxes/cluster/componentsAlg/areamanager.py
|
CentroGeo/clusterpy_python3
|
5c2600b048836e54495dc5997a250af72f72f6e7
|
[
"BSD-3-Clause"
] | null | null | null |
# encoding: latin2
"""Algorithm utilities
G{packagetree core}
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from builtins import range
from builtins import object
from past.utils import old_div
__author__ = "Juan C. Duque"
__credits__ = "Copyright (c) 2009-11 Juan C. Duque"
__license__ = "New BSD License"
__version__ = "1.0.0"
__maintainer__ = "RiSE Group"
__email__ = "contacto@rise-group.org"
from .areacl import AreaCl
from .dist2Regions import distanceStatDispatcher
class AreaManager(object):
"""
This class contains operations at areal level, including the generation of
instances of areas, a wide range of area2area and area2region distance
functions.
"""
def __init__(self, w, y, distanceType="EuclideanSquared", variance="false"):
"""
@type w: dictionary
@param w: With B{key} = area Id, and B{value} = list with Ids of neighbours of
each area.
@type y: dictionary
@param y: With B{key} = area Id, and B{value} = list with attribute
values.
@type distanceType: string
@keyword distanceType: Function to calculate the distance between areas. Default value I{distanceType = 'EuclideanSquared'}.
@type variance: boolean
@keyword variance: Boolean indicating if the data have variance matrix. Default value I{variance = 'false'}.
"""
self.y = y
self.areas = {}
self.noNeighs = set([])
self.variance = variance
self.distanceType = distanceType
self.createAreas(w, y)
self.distanceStatDispatcher = distanceStatDispatcher
def createAreas(self, w, y):
"""
Creates instances of areas based on a sparse weights matrix (w) and a
data array (y).
"""
n = len(self.y)
self.distances = {}
noNeighs = []
for key in range(n):
data = y[key]
try:
neighbours = w[key]
except:
neighbours = {}
w[key] = {}
if len(w[key]) == 0:
self.noNeighs = self.noNeighs | set([key])
a = AreaCl(key, neighbours, data, self.variance)
self.areas[key] = a
if len(self.noNeighs) > 0:
print("Disconnected areas neighs: ", list(self.noNeighs))
def returnDistance2Area(self, area, otherArea):
"""
Returns the distance between two areas
"""
i = 0
j = 0
dist = 0.0
i = area.id
j = otherArea.id
if i < j:
dist = self.distances[(i, j)]
elif i == j:
dist = 0.0
else:
dist = self.distances[(j, i)]
return dist
def getDataAverage(self, areaList, dataIndex):
"""
Returns the attribute centroid of a set of areas
"""
dataAvg = len(dataIndex) * [0.0]
for aID in areaList:
i = 0
for index in dataIndex:
dataAvg[i] += old_div(self.areas[aID].data[index],len(areaList))
i += 1
return dataAvg
def getDistance2Region(self, area, areaList, distanceStat="Centroid", weights=[], indexData=[]):
"""
Returns the distance from an area to a region (defined as a list of
area IDs)
"""
if isinstance(distanceStat, str):
if len(indexData) == 0:
indexData = list(range(len(area.data)))
return self.distanceStatDispatcher[distanceStat](self, area, areaList, indexData)
else:
distance = 0.0
i = 0
for dS in distanceStat:
if len(indexData) == 0:
indexDataDS = list(range(len(area.data)))
else:
indexDataDS = indexData[i]
if len(weights) > 0:
distance += weights[i]
self.distanceStatDispatcher[dS](self, area, areaList, indexDataDS)
else:
distance += self.distanceStatDispatcher[dS](self, area, areaList, indexDataDS)
i += 1
return distance
def getDistance2AreaMin(self, area, areaList):
"""
Return the ID of the area whitin a region that is closest to an area
outside the region
"""
areaMin = -1;
distanceMin = 1e300
for aID in areaList:
if self.distances[area.id, aID] < distanceMin:
areaMin = aID
distanceMin = self.distances[area.id, aID]
return areaMin
def checkFeasibility(self, solution):
"""
Checks feasibility of a candidate solution
"""
n = len(solution)
regions = {}
for i in range(n):
try:
regions[solution[i]] = regions[solution[i]] + [i]
except:
regions[solution[i]] = [i]
feasible = 1
r = len(regions)
for i in range(r):
if len(regions[i]) > 0:
newRegion = set([regions[i][0]])
areas2Eval = set([regions[i][0]])
while(len(areas2Eval) > 0):
area = areas2Eval.pop()
areaNeighs = (set(self.areas[area].neighs) & set(regions[i]))
areas2Eval = areas2Eval | (areaNeighs - newRegion)
newRegion = newRegion | areaNeighs
if set(regions[i]) -newRegion != set([]):
feasible = 0
break
return feasible
| 33.343195
| 132
| 0.546584
| 5,091
| 0.903461
| 0
| 0
| 0
| 0
| 0
| 0
| 1,555
| 0.275954
|
55fc362ece90946015f4b5b227a527251bc8be9e
| 1,463
|
py
|
Python
|
geolocator.py
|
Kugeleis/TeslaInventoryChecker
|
93b6e8e2885bf8e0c15942e940d5d5626754f7a8
|
[
"MIT"
] | 7
|
2021-08-13T16:46:32.000Z
|
2021-12-23T17:54:33.000Z
|
geolocator.py
|
Kugeleis/TeslaInventoryChecker
|
93b6e8e2885bf8e0c15942e940d5d5626754f7a8
|
[
"MIT"
] | null | null | null |
geolocator.py
|
Kugeleis/TeslaInventoryChecker
|
93b6e8e2885bf8e0c15942e940d5d5626754f7a8
|
[
"MIT"
] | 5
|
2021-08-13T04:38:05.000Z
|
2021-12-14T06:29:11.000Z
|
import http.client
import json
from types import SimpleNamespace
def get_token():
conn = http.client.HTTPSConnection("www.tesla.com")
payload = {
"resource": "geocodesvc",
"csrf_name": "",
"csrf_value": ""
}
headers = {
'Content-Type': 'application/json'
}
conn.request("POST", "/inventory/api/v1/refresh_token", json.dumps(payload), headers)
res = conn.getresponse()
data = res.read()
auth = json.loads(data, object_hook=lambda d: SimpleNamespace(**d))
return auth.token
def decode_zip(token, zip_code, country_code):
conn = http.client.HTTPSConnection("www.tesla.com")
payload = {
"token": token,
"postal_code": zip_code,
"country_code": country_code,
"csrf_name": "",
"csrf_value": ""
}
headers = {
'Content-Type': 'application/json'
}
conn.request("POST", "/inventory/api/v1/address", json.dumps(payload), headers)
res = conn.getresponse()
data = res.read()
geo_result = json.loads(data, object_hook=lambda d: SimpleNamespace(**d))
# Example Data:
# {
# "city": "Montreal",
# "stateProvince": "Quebec",
# "postalCode": "H1K 3T2",
# "countryCode": "CA",
# "countryName": "Canada",
# "longitude": -73.5614205,
# "latitude": 45.60802700000001,
# "county": "Montreal",
# "stateCode": "QC"
# }
return geo_result.data
| 28.686275
| 89
| 0.584416
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 557
| 0.380725
|
55fd77fad6026ba26284584227c80ea384f74fc0
| 4,942
|
py
|
Python
|
client/runTFpose.py
|
BamLubi/tf-pose_Client
|
07032a8b7ba80f717e74f6c893fadc6e2faa6573
|
[
"MIT"
] | 1
|
2022-03-21T18:02:05.000Z
|
2022-03-21T18:02:05.000Z
|
client/runTFpose.py
|
BamLubi/tf-pose_Client
|
07032a8b7ba80f717e74f6c893fadc6e2faa6573
|
[
"MIT"
] | null | null | null |
client/runTFpose.py
|
BamLubi/tf-pose_Client
|
07032a8b7ba80f717e74f6c893fadc6e2faa6573
|
[
"MIT"
] | null | null | null |
import argparse
import cv2
import time
import numpy as np
from tf_pose.estimator import TfPoseEstimator
from tf_pose.networks import get_graph_path, model_wh
"""
封装并调用tf-openpose项目所提供的骨架信息识别接口
"""
class TFPOSE:
def __init__(self):
# 0. 参数
self.fps_time = 0
self.frame_count = 0
# 1. 解析参数
self.parseArgs()
# 2. 输出参数
self.printArgs()
# 3. 生成tfpose实例
self.w, self.h = model_wh(self.args.resize)
self.e = TfPoseEstimator(get_graph_path(self.args.model), target_size=(self.w, self.h))
def parseArgs(self):
"""解析参数"""
parser = argparse.ArgumentParser(description='tf-pose-estimation realtime webcam')
parser.add_argument('--video', type=str, default=0,
help='if provided, set the video path')
parser.add_argument('--isoutput', type=bool, default=False,
help='whether write to file')
parser.add_argument('--output', type=str, default='test.avi',
help='if provided, set the output video path')
parser.add_argument('--isorigin', type=bool, default=False,
help='whether output origin img')
parser.add_argument('--resize', type=str, default='432x368',
help='if provided, resize images before they are processed. default=256x256, Recommends : 432x368 or 656x368 or 1312x736 ')
parser.add_argument('--resize-out-ratio', type=float, default=4.0,
help='if provided, resize heatmaps before they are post-processed. default=1.0')
parser.add_argument('--model', type=str, default='mobilenet_v2_large',
help='cmu / mobilenet_thin / mobilenet_v2_large / mobilenet_v2_small')
parser.add_argument('--show-process', type=bool, default=False,
help='for debug purpose, if enabled, speed for inference is dropped.')
# 命令行解析模块
self.args = parser.parse_args()
def printArgs(self):
"""输出参数"""
print('获取的参数如下:')
print('video-视频: %s' % (self.args.video))
print('resize-重写图片大小: %s' % (self.args.resize))
print('resize-out-ratio-重写关键点热图大小: %s' % (self.args.resize_out_ratio))
print('show-process-是否展示过程: %s' % (self.args.show_process))
print('model-模型: %s, 模型路径: %s' % (self.args.model, get_graph_path(self.args.model)))
def setArgsVideo(self, video):
"""设置video参数"""
self.args.__setattr__('video', video)
def setArgsIsOrigin(self, isorigin):
"""设置isorigin参数"""
self.args.__setattr__('isorigin', isorigin)
def setArgsIsOutput(self, isoutput):
"""设置isorigin参数"""
self.args.__setattr__('isoutput', isoutput)
def initVideo(self):
"""
初始化视频信息
"""
print('读取视频')
self.cam = cv2.VideoCapture(self.args.video)
self.ret_val, self.image = self.cam.read() # 获取视频第一帧图片,ret_val为bool值
self.frame_count = 0 # 重置帧数为0,因为会换视频
# 是否写入文件
if self.args.isoutput :
fps = self.cam.get(cv2.CAP_PROP_FPS) # 视频帧率
fourcc = cv2.VideoWriter_fourcc(*'XVID') # 保存视频为MPEG-4编码
frame_size = (int(self.cam.get(cv2.CAP_PROP_FRAME_WIDTH)), int(self.cam.get(cv2.CAP_PROP_FRAME_HEIGHT)))
self.videoWriter = cv2.VideoWriter(self.args.output, fourcc, fps, frame_size)
print('源视频信息: 帧图片大小 %s, 帧率 %s, 视频大小 %s' % (self.image.shape, fps, frame_size))
def getHumans(self):
humans = self.e.inference(self.image, resize_to_default=(self.w > 0 and self.h > 0), upsample_size=self.args.resize_out_ratio)
return humans
def getNextFrame(self):
"""获取下一帧的图片"""
self.ret_val, self.image = self.cam.read()
self.frame_count += 1
return self.ret_val
def hasNextFrame(self):
"""是否还有下一帧"""
return self.ret_val
def getFrameCount(self):
"""获取帧数"""
return self.frame_count
def runOnce(self):
"""
运行一次,即识别一帧,并返回此帧的cv2图片
"""
fps_time = time.time()
# 帧图片处理
print('帧图片处理...')
humans = self.getHumans()
# 关键点绘图
print('画图...')
if self.args.isorigin :
# 显示原图
pose_img = TfPoseEstimator.draw_humans(np.array(self.image), humans, imgcopy=False)
else:
# 不显示原图
emptyImage = np.zeros(self.image.shape, np.uint8)
emptyImage[...] = 0
pose_img = TfPoseEstimator.draw_humans(emptyImage, humans, imgcopy=False)
# cv2.putText(pose_img, "FPS: %f" % (1.0 / (time.time() - fps_time)), (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
# 判断写入文件
if self.args.isoutput :
self.videoWriter.write(pose_img)
return pose_img, humans
if __name__ == '__main__':
TFPOSE()
| 37.439394
| 151
| 0.593484
| 5,131
| 0.948429
| 0
| 0
| 0
| 0
| 0
| 0
| 1,808
| 0.334196
|
55fe127a3e15c5c409ac7dd672e540ee28e8d786
| 413
|
py
|
Python
|
oldPython/driving_app.py
|
Awarua-/Can-I-Have-Your-Attention-COSC475-Research
|
71b5140b988aa6512a7cf5b5b6d043e20fd02084
|
[
"MIT"
] | null | null | null |
oldPython/driving_app.py
|
Awarua-/Can-I-Have-Your-Attention-COSC475-Research
|
71b5140b988aa6512a7cf5b5b6d043e20fd02084
|
[
"MIT"
] | null | null | null |
oldPython/driving_app.py
|
Awarua-/Can-I-Have-Your-Attention-COSC475-Research
|
71b5140b988aa6512a7cf5b5b6d043e20fd02084
|
[
"MIT"
] | null | null | null |
from kivy.app import App
from kivy.uix.label import Label
from kivy.core.window import Window
class DrivingApp(App):
def build(self):
Window.fullscreen = False
# Need to set the size, otherwise very pixalated
# wonders about pixel mapping?
Window.size(1920, 1080)
b = Label(text='Launch Child App')
return b
if __name__ == "__main__":
DrivingApp.run()
| 21.736842
| 56
| 0.653753
| 266
| 0.644068
| 0
| 0
| 0
| 0
| 0
| 0
| 106
| 0.256659
|
55fe69df7aecb356db95a682b17146dfaf4521ce
| 3,103
|
py
|
Python
|
api/src/opentrons/calibration_storage/helpers.py
|
faliester/opentrons
|
e945d0f72fed39b0f68c0b30b7afd1981644184f
|
[
"Apache-2.0"
] | 1
|
2022-03-17T20:38:04.000Z
|
2022-03-17T20:38:04.000Z
|
api/src/opentrons/calibration_storage/helpers.py
|
faliester/opentrons
|
e945d0f72fed39b0f68c0b30b7afd1981644184f
|
[
"Apache-2.0"
] | null | null | null |
api/src/opentrons/calibration_storage/helpers.py
|
faliester/opentrons
|
e945d0f72fed39b0f68c0b30b7afd1981644184f
|
[
"Apache-2.0"
] | null | null | null |
""" opentrons.calibration_storage.helpers: various miscellaneous
functions
This module has functions that you can import to save robot or
labware calibration to its designated file location.
"""
import json
from typing import Union, List, Dict, TYPE_CHECKING
from dataclasses import is_dataclass, asdict
from hashlib import sha256
from . import types as local_types
if TYPE_CHECKING:
from opentrons_shared_data.labware.dev_types import LabwareDefinition
DictionaryFactoryType = Union[List, Dict]
def dict_filter_none(data: DictionaryFactoryType) -> Dict:
"""
Helper function to filter out None keys from a dataclass
before saving to file.
"""
return dict(item for item in data if item[1] is not None)
def convert_to_dict(obj) -> Dict:
# The correct way to type this is described here:
# https://github.com/python/mypy/issues/6568
# Unfortnately, since it's not currently supported I have an
# assert check instead.
assert is_dataclass(obj), 'This function is intended for dataclasses only'
return asdict(obj, dict_factory=dict_filter_none)
def hash_labware_def(labware_def: 'LabwareDefinition') -> str:
"""
Helper function to take in a labware definition and return
a hashed string of key elemenets from the labware definition
to make it a unique identifier.
:param labware_def: Full labware definitino
:returns: sha256 string
"""
# remove keys that do not affect run
blocklist = ['metadata', 'brand', 'groups']
def_no_metadata = {
k: v for k, v in labware_def.items() if k not in blocklist}
sorted_def_str = json.dumps(
def_no_metadata, sort_keys=True, separators=(',', ':'))
return sha256(sorted_def_str.encode('utf-8')).hexdigest()
def details_from_uri(uri: str, delimiter='/') -> local_types.UriDetails:
"""
Unpack a labware URI to get the namespace, loadname and version
"""
if uri:
info = uri.split(delimiter)
return local_types.UriDetails(
namespace=info[0], load_name=info[1], version=int(info[2]))
else:
# Here we are assuming that the 'uri' passed in is actually
# the loadname, though sometimes it may be an empty string.
return local_types.UriDetails(
namespace='', load_name=uri, version=1)
def uri_from_details(namespace: str, load_name: str,
version: Union[str, int],
delimiter='/') -> str:
""" Build a labware URI from its details.
A labware URI is a string that uniquely specifies a labware definition.
:returns str: The URI.
"""
return f'{namespace}{delimiter}{load_name}{delimiter}{version}'
def uri_from_definition(definition: 'LabwareDefinition', delimiter='/') -> str:
""" Build a labware URI from its definition.
A labware URI is a string that uniquely specifies a labware definition.
:returns str: The URI.
"""
return uri_from_details(definition['namespace'],
definition['parameters']['loadName'],
definition['version'])
| 32.663158
| 79
| 0.684821
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,499
| 0.483081
|
55fe802b2df8f3e2a5853155117ec23bac4176ca
| 3,264
|
py
|
Python
|
scripts/OpenRobotPyxl.py
|
coder-cell/robotframework-openpyxl
|
abc839755a1e8c0208065e9c9568d7df732a6792
|
[
"MIT"
] | null | null | null |
scripts/OpenRobotPyxl.py
|
coder-cell/robotframework-openpyxl
|
abc839755a1e8c0208065e9c9568d7df732a6792
|
[
"MIT"
] | null | null | null |
scripts/OpenRobotPyxl.py
|
coder-cell/robotframework-openpyxl
|
abc839755a1e8c0208065e9c9568d7df732a6792
|
[
"MIT"
] | null | null | null |
import openpyxl
from robot.api.deco import keyword, library
from robot.api import logger
@library
class OpenRobotPyxl:
def __init__(self):
self.active_sheet = None
self.active_book = None
self.path = None
self.bookname = None
@keyword("Create New Workbook")
def create_new_workbook(self, _path, book_name, sheet_name, postion=0):
wb = openpyxl.Workbook()
self.path = _path
self.bookname = book_name + ".xlsx"
ws = wb.create_sheet(sheet_name, postion)
self.active_book, self.active_sheet = wb, ws
return self.active_book
@keyword('Close Workbook')
def close_workbook(self):
self.active_book.save(self.path + "/" + self.bookname)
@keyword('Get Active Sheet')
def get_active_sheet(self):
if self.active_book:
if self.active_sheet:
return self.active_sheet
else:
# Return the first sheet in the work book.
return self.active_book.worksheets[0]
else:
return None
@keyword('Active Sheet Name')
def get_active_sheet_name(self):
return self.get_active_sheet().title
@keyword('Load Workbook')
def load_workbook(self, path, bookname):
self.active_book = openpyxl.load_workbook(path + "/" + bookname)
self.path = path
self.bookname = bookname
self.active_sheet = None
self.active_sheet = self.get_active_sheet()
@keyword('Add Sheet')
def add_new_sheet(self, sheetname, index=0):
self.active_book.create_sheet(title=sheetname, index=index)
@keyword('Set Cell Value')
def add_value_to_cell(self, row, col, value):
self.active_sheet.cell(row, col, value)
@keyword('Get Cell Value')
def get_cell_value(self, row, col):
return self.active_sheet.cell(row, col).value
@keyword('Insert Row')
def insert_empty_row(self, row_number):
return self.active_sheet.insert_rows(row_number)
@keyword('Insert Column')
def insert_empty_col(self, col_number):
return self.active_sheet.insert_cols(col_number)
@keyword('Delete Row')
def delete_row(self, row_number):
return self.active_sheet.delete_rows(row_number)
@keyword('Delete Column')
def delete_col(self, col_number):
return self.active_sheet.delete_cols(col_number)
@keyword('Convert List to Row')
def insert_value_to_row(self, row, col, listofdata):
if type(listofdata) == list:
datalength = len(listofdata)
for index, row_ in enumerate(range(row, row+datalength)):
cell = self.active_sheet.cell(row_, col)
cell.value = listofdata[index]
else:
return Exception("The data should be of list.")
@keyword('Convert List to Column')
def insert_value_to_row(self, row, col, listofdata):
if type(listofdata) == list:
datalength = len(listofdata)
for index, col_ in enumerate(range(col, col + datalength)):
cell = self.active_sheet.cell(row, col_)
cell.value = listofdata[index]
else:
return Exception("The data should be of list.")
return True
| 32.969697
| 75
| 0.636336
| 3,163
| 0.969056
| 0
| 0
| 3,172
| 0.971814
| 0
| 0
| 344
| 0.105392
|
55fec657248ea9359324a70a7e7e0fc53b322616
| 1,852
|
py
|
Python
|
club/urls.py
|
NSYT0607/DONGKEY
|
83f926f22a10a28895c9ad71038c9a27d200e231
|
[
"MIT"
] | 1
|
2018-04-10T11:47:16.000Z
|
2018-04-10T11:47:16.000Z
|
club/urls.py
|
NSYT0607/DONGKEY
|
83f926f22a10a28895c9ad71038c9a27d200e231
|
[
"MIT"
] | null | null | null |
club/urls.py
|
NSYT0607/DONGKEY
|
83f926f22a10a28895c9ad71038c9a27d200e231
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
app_name = 'club'
urlpatterns = [
path('create/', views.create_club, name='create_club'),
path('update/<int:club_pk>', views.update_club, name='update_club'),
path('read_admin_club/<str:club>/<int:ctg_pk>/', views.read_admin_club, name='read_admin_club_ctg'),
path('<int:pk>/', views.ClubView.as_view(), name='club_view'),
path('read_admin_club/<str:club>/', views.read_admin_club, name='read_admin_club'),
path('read_non_admin_club/<str:club>/<int:ctg_pk>/', views.read_non_admin_club, name='read_non_admin_club_ctg'),
path('read_non_admin_club/<str:club>/', views.read_non_admin_club, name='read_non_admin_club'),
path('apply/<str:club>/', views.apply_club, name='apply_club'),
path('admit/<int:club>/<int:pk>/', views.admit, name='admit'),
path('update_is_admin/<int:club_pk>/<int:user_pk>/', views.update_is_admin, name='update_is_admin'),
path('manage/<int:club_pk>/', views.manage_member, name='manage_member'),
path('member_list/<int:club_pk>/non_admin', views.member_list_for_non_admin,
name='member_list_for_non_admin'),
path('create/club/rule/<str:club>/', views.create_club_rule, name='create_club_rule'),
path('read/admin_club/apply_list/<str:club>/', views.read_apply_list, name='read_apply_list'),
path('read/admin_club/rule/<str:club>/', views.read_admin_club_rule, name='read_admin_club_rule'),
path('read/non_admin_club/rule/<str:club>/', views.read_non_admin_club_rule, name='read_non_admin_club_rule'),
path('update/club/rule/<str:club>/<int:rule_pk>/', views.update_club_rule, name='update_club_rule'),
path('delete/club/rule/<str:club>/<int:rule_pk>/', views.delete_club_rule, name='delete_club_rule'),
path('exit_club/<int:club_pk>/<int:user_pk>/', views.exit_club, name='exit_club'),
]
| 52.914286
| 116
| 0.721922
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 950
| 0.512959
|
55feec79a1027ecfba7881baf9cccd2719790498
| 1,270
|
py
|
Python
|
interview_kickstart/01_sorting_algorithms/class_discussed_problems/python/0215_kth_largest_element_in_an_array.py
|
mrinalini-m/data_structures_and_algorithms
|
f9bebcca8002064e26ba5b46e47b8abedac39c3e
|
[
"MIT"
] | 2
|
2020-12-18T21:42:05.000Z
|
2020-12-21T06:07:33.000Z
|
interview_kickstart/01_sorting_algorithms/class_discussed_problems/python/0215_kth_largest_element_in_an_array.py
|
mrinalini-m/data_structures_and_algorithms
|
f9bebcca8002064e26ba5b46e47b8abedac39c3e
|
[
"MIT"
] | null | null | null |
interview_kickstart/01_sorting_algorithms/class_discussed_problems/python/0215_kth_largest_element_in_an_array.py
|
mrinalini-m/data_structures_and_algorithms
|
f9bebcca8002064e26ba5b46e47b8abedac39c3e
|
[
"MIT"
] | 2
|
2020-07-04T20:30:19.000Z
|
2021-08-31T08:32:36.000Z
|
from random import randint
from typing import List
class Solution:
def findKthLargest(self, nums: List[int], k: int) -> int:
jthSmallest = len(nums) - k
return self.quickSelect(nums, 0, len(nums) - 1, jthSmallest)
def quickSelect(self, nums: List[int], start: int, end: int, jthSmallest: int) -> int:
pivot = self.partition(nums, start, end)
if (pivot == jthSmallest):
return nums[pivot]
elif (jthSmallest < pivot):
return self.quickSelect(nums, start, pivot - 1, jthSmallest)
else:
return self.quickSelect(nums, pivot + 1, end, jthSmallest)
def partition(self, nums: List[int], start: int, end: int) -> int:
randomIndex = randint(start, end)
self.swap(nums, randomIndex, start)
pivot = nums[start]
smaller = start
for bigger in range(start + 1, end + 1):
if nums[bigger] < pivot:
smaller += 1
self.swap(nums, smaller, bigger)
self.swap(nums, start, smaller)
return smaller
def swap(self, nums: List[int], i: int, j: int) -> None:
temp = nums[i]
nums[i] = nums[j]
nums[j] = temp
print(Solution().findKthLargest([4, 1, 2, 11], 2))
| 27.021277
| 90
| 0.574803
| 1,163
| 0.915748
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
55ff7e57e726077e74bb90a288c442b6922782cb
| 3,033
|
py
|
Python
|
termpixels/util.py
|
loganzartman/termpixels
|
4353cc0eb9f6947cd5bb8286322a8afea597d741
|
[
"MIT"
] | 17
|
2019-04-11T20:05:13.000Z
|
2022-03-08T22:26:44.000Z
|
termpixels/util.py
|
loganzartman/termpixels
|
4353cc0eb9f6947cd5bb8286322a8afea597d741
|
[
"MIT"
] | 14
|
2019-05-16T19:26:58.000Z
|
2020-10-27T09:35:02.000Z
|
termpixels/util.py
|
loganzartman/termpixels
|
4353cc0eb9f6947cd5bb8286322a8afea597d741
|
[
"MIT"
] | 1
|
2020-12-09T16:39:44.000Z
|
2020-12-09T16:39:44.000Z
|
from unicodedata import east_asian_width, category
from functools import lru_cache
import re
def corners_to_box(x0, y0, x1, y1):
"""convert two corners (x0, y0, x1, y1) to (x, y, width, height)"""
x0, x1 = min(x0, x1), max(x0, x1)
y0, y1 = min(y0, y1), max(y0, y1)
return x0, y0, x1 - x0 + 1, y1 - y0 + 1
# not sure how to determine how ambiguous characters will be rendered
_ambiguous_is_wide = False
def set_ambiguous_is_wide(is_wide):
""" set whether ambiguous characters are considered to be wide """
global _ambiguous_is_wide
if _ambiguous_is_wide != is_wide:
_ambiguous_is_wide = is_wide
terminal_char_len.cache_clear()
@lru_cache(1024)
def terminal_char_len(ch):
""" return the width of a character in terminal cells """
if ch == "\t":
# we can't know the width of a tab without context
# prefer using spaces instead
return None
if not terminal_printable(ch):
return 0
wide = ["F","W","A"] if _ambiguous_is_wide else ["F","W"]
return 2 if east_asian_width(ch) in wide else 1
def terminal_len(s):
""" return the width of a string in terminal cells """
return sum(map(terminal_char_len, s))
def terminal_printable(ch):
""" determine if a character is "printable" """
return not category(ch).startswith("C")
_newline_regex = re.compile(r"\r\n|\r|\n")
def splitlines_print(s):
""" like str.splitlines() but keeps all empty lines """
return _newline_regex.split(s)
def wrap_text(text, line_len, *, tab_size=4, word_sep=re.compile(r"\s+|\W"),
break_word=False, hyphen="", newline="\n"):
""" returns a terminal-line-wrapped version of text """
text = text.replace("\t", " " * tab_size)
hl = terminal_len(hyphen)
buf = []
i = 0
col = 0
while i < len(text):
match = word_sep.search(text, i)
word = text[i:]
sep = ""
if match:
word = text[i:match.start()]
sep = match.group(0)
i = match.end()
else:
i = len(text)
# handle wrappable/breakable words
wl = terminal_len(word)
while col + wl > line_len:
if break_word and col < line_len - hl or col == 0:
while col + terminal_char_len(word[0]) <= line_len - hl:
buf.append(word[0])
col += terminal_char_len(word[0])
word = word[1:]
buf.append(hyphen)
buf.append(newline)
col = 0
wl = terminal_len(word)
buf.append(word)
col += wl
# handle truncatable separators
sl = terminal_len(sep)
if col + sl > line_len:
while col + terminal_char_len(sep[0]) <= line_len:
buf.append(sep[0])
col += terminal_char_len(sep[0])
sep = sep[1:]
buf.append(newline)
col = 0
else:
buf.append(sep)
col += sl
return "".join(buf)
| 32.265957
| 76
| 0.574019
| 0
| 0
| 0
| 0
| 407
| 0.134191
| 0
| 0
| 675
| 0.222552
|
55ffa154fe658f0af46cbd92f080b7eac5967357
| 303
|
py
|
Python
|
json.py
|
AbhijithGanesh/Flask-HTTP-Server
|
78f6c6985e6ffd9f4f70738771d6fcdb802964cc
|
[
"BSD-3-Clause"
] | null | null | null |
json.py
|
AbhijithGanesh/Flask-HTTP-Server
|
78f6c6985e6ffd9f4f70738771d6fcdb802964cc
|
[
"BSD-3-Clause"
] | null | null | null |
json.py
|
AbhijithGanesh/Flask-HTTP-Server
|
78f6c6985e6ffd9f4f70738771d6fcdb802964cc
|
[
"BSD-3-Clause"
] | null | null | null |
import json
'''
READ THE DATABASE README before operating
'''
File = r'''YOUR FILE'''
with open(File,'a') as fileObj:
data = json.load()
'''
YOUR DATA LOGIC GOES IN HERE
Once the data is changed, to write it to your JSON file use the following command.
'''
json.dump(object,File)
| 25.25
| 86
| 0.656766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 199
| 0.656766
|
3600f4551fc329b671400ff96e43cfab6f75ddb4
| 3,128
|
py
|
Python
|
slash/hooks.py
|
omergertel/slash
|
7dd5710a05822bbbaadc6c6517cefcbaa6397eab
|
[
"BSD-3-Clause"
] | null | null | null |
slash/hooks.py
|
omergertel/slash
|
7dd5710a05822bbbaadc6c6517cefcbaa6397eab
|
[
"BSD-3-Clause"
] | null | null | null |
slash/hooks.py
|
omergertel/slash
|
7dd5710a05822bbbaadc6c6517cefcbaa6397eab
|
[
"BSD-3-Clause"
] | null | null | null |
import gossip
from .conf import config
from .utils.deprecation import deprecated
def _deprecated_to_gossip(func):
return deprecated(since="0.6.0", message="Use gossip instead")(func)
def _define(hook_name, **kwargs):
hook = gossip.define("slash.{0}".format(hook_name), **kwargs)
globals()[hook_name] = hook
return hook
_define('session_start', doc="Called right after session starts")
_define('session_end', doc="Called right before the session ends, regardless of the reason for termination")
_define('after_session_start', doc="Second entry point for session start, useful for plugins relying on other plugins' session_start routine")
_define('test_interrupt', doc="Called when a test is interrupted by a KeyboardInterrupt or other similar means")
_define('test_start', doc="Called right after a test starts")
_define('test_end', doc="Called right before a test ends, regardless of the reason for termination")
_define('test_success', doc="Called on test success")
_define('test_error', doc="Called on test error")
_define('test_failure', doc="Called on test failure")
_define('test_skip', doc="Called on test skip", arg_names=("reason",))
_define('result_summary', doc="Called at the end of the execution, when printing results")
_define('exception_caught_before_debugger',
doc="Called whenever an exception is caught, but a debugger hasn't been entered yet")
_define('exception_caught_after_debugger',
doc="Called whenever an exception is caught, and a debugger has already been run")
_slash_group = gossip.get_group('slash')
_slash_group.set_strict()
_slash_group.set_exception_policy(gossip.RaiseDefer())
@gossip.register('gossip.on_handler_exception')
def debugger(handler, exception, hook): # pylint: disable=unused-argument
from .exception_handling import handle_exception
if hook.group is _slash_group and config.root.debug.debug_hook_handlers:
handle_exception(exception)
@_deprecated_to_gossip
def add_custom_hook(hook_name):
"""
Adds an additional hook to the set of available hooks
"""
return _define(hook_name)
@_deprecated_to_gossip
def ensure_custom_hook(hook_name):
"""
Like :func:`.add_custom_hook`, only forgives if the hook already exists
"""
try:
return gossip.get_hook("slash.{0}".format(hook_name))
except LookupError:
return _define(hook_name)
@_deprecated_to_gossip
def remove_custom_hook(hook_name):
"""
Removes a hook from the set of available hooks
"""
gossip.get_hook("slash.{0}".format(hook_name)).undefine()
globals().pop(hook_name)
@_deprecated_to_gossip
def get_custom_hook_names():
"""
Retrieves the names of all custom hooks currently installed
"""
raise NotImplementedError() # pragma: no cover
@_deprecated_to_gossip
def get_all_hooks():
return [
(hook.name, hook)
for hook in gossip.get_group('slash').get_hooks()]
@_deprecated_to_gossip
def get_hook_by_name(hook_name):
"""
Returns a hook (if exists) by its name, otherwise returns None
"""
return gossip.get_hook('slash.{0}'.format(hook_name))
| 34.373626
| 142
| 0.741368
| 0
| 0
| 0
| 0
| 1,460
| 0.466752
| 0
| 0
| 1,483
| 0.474105
|
36011f50763e2763762534e112d2a7cea6f3af2e
| 65
|
py
|
Python
|
experiments/archived/20210203/bag_model/models/__init__.py
|
fxnnxc/text_summarization
|
b8c8a5f491bc44622203602941c1514b2e006fe3
|
[
"Apache-2.0"
] | 5
|
2020-10-14T02:30:44.000Z
|
2021-05-06T12:48:28.000Z
|
experiments/archived/20210119/bag_model/models/__init__.py
|
fxnnxc/text_summarization
|
b8c8a5f491bc44622203602941c1514b2e006fe3
|
[
"Apache-2.0"
] | 2
|
2020-12-19T05:59:31.000Z
|
2020-12-22T11:05:31.000Z
|
experiments/archived/20210203/bag_model/models/__init__.py
|
fxnnxc/text_summarization
|
b8c8a5f491bc44622203602941c1514b2e006fe3
|
[
"Apache-2.0"
] | null | null | null |
from .hub_interface import * # noqa
from .model import * # noqa
| 32.5
| 36
| 0.707692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 0.184615
|
360246393544aa24389fdcd4c6b8786fa1b242b5
| 232
|
py
|
Python
|
src/CodeLearn/plaintextCode/BloomTech/BTU5W1/U5W1P2_Task3_w1.py
|
MingjunGeng/Code-Knowledge
|
5b376f6b3ff9e7fa0ab41c7b57e3a80313fa0daa
|
[
"MIT"
] | null | null | null |
src/CodeLearn/plaintextCode/BloomTech/BTU5W1/U5W1P2_Task3_w1.py
|
MingjunGeng/Code-Knowledge
|
5b376f6b3ff9e7fa0ab41c7b57e3a80313fa0daa
|
[
"MIT"
] | null | null | null |
src/CodeLearn/plaintextCode/BloomTech/BTU5W1/U5W1P2_Task3_w1.py
|
MingjunGeng/Code-Knowledge
|
5b376f6b3ff9e7fa0ab41c7b57e3a80313fa0daa
|
[
"MIT"
] | 1
|
2022-03-18T04:52:10.000Z
|
2022-03-18T04:52:10.000Z
|
#!/usr/bin/python3
# --- 001 > U5W2P1_Task3_w1
def solution(i):
return float(i)
if __name__ == "__main__":
print('----------start------------')
i = 12
print(solution( i ))
print('------------end------------')
| 19.333333
| 40
| 0.465517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 113
| 0.487069
|
3603655d64ea26fd4eb5614d884927de08638bdc
| 30,296
|
py
|
Python
|
plugins/modules/oci_sch_service_connector.py
|
A7rMtWE57x/oci-ansible-collection
|
80548243a085cd53fd5dddaa8135b5cb43612c66
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/oci_sch_service_connector.py
|
A7rMtWE57x/oci-ansible-collection
|
80548243a085cd53fd5dddaa8135b5cb43612c66
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/oci_sch_service_connector.py
|
A7rMtWE57x/oci-ansible-collection
|
80548243a085cd53fd5dddaa8135b5cb43612c66
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2017, 2020 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_sch_service_connector
short_description: Manage a ServiceConnector resource in Oracle Cloud Infrastructure
description:
- This module allows the user to create, update and delete a ServiceConnector resource in Oracle Cloud Infrastructure
- For I(state=present), creates a new service connector in the specified compartment.
A service connector is a logically defined flow for moving data from
a source service to a destination service in Oracle Cloud Infrastructure.
For general information about service connectors, see
L(Service Connector Hub Overview,https://docs.cloud.oracle.com/iaas/service-connector-hub/using/index.htm).
- For purposes of access control, you must provide the
L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment where
you want the service connector to reside. Notice that the service connector
doesn't have to be in the same compartment as the source or target services.
For information about access control and compartments, see
L(Overview of the IAM Service,https://docs.cloud.oracle.com/iaas/Content/Identity/Concepts/overview.htm).
- After you send your request, the new service connector's state is temporarily
CREATING. When the state changes to ACTIVE, data begins transferring from the
source service to the target service. For instructions on deactivating and
activating service connectors, see
L(To activate or deactivate a service connector,https://docs.cloud.oracle.com/iaas/service-connector-hub/using/index.htm).
- "This resource has the following action operations in the M(oci_service_connector_actions) module: activate, deactivate."
version_added: "2.9"
author: Oracle (@oracle)
options:
display_name:
description:
- A user-friendly name. It does not have to be unique, and it is changeable.
Avoid entering confidential information.
- Required for create using I(state=present).
- Required for update, delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
- This parameter is updatable when C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
type: str
aliases: ["name"]
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the
comparment to create the service connector in.
- Required for create using I(state=present).
- Required for update when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
- Required for delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
type: str
description:
description:
- The description of the resource. Avoid entering confidential information.
- This parameter is updatable.
type: str
source:
description:
- ""
- Required for create using I(state=present).
- This parameter is updatable.
type: dict
suboptions:
kind:
description:
- The type descriminator.
type: str
choices:
- "logging"
required: true
log_sources:
description:
- The resources affected by this work request.
type: list
required: true
suboptions:
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment containing the log
source.
type: str
required: true
log_group_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the log group.
type: str
log_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the log.
type: str
tasks:
description:
- The list of tasks.
- This parameter is updatable.
type: list
suboptions:
kind:
description:
- The type descriminator.
type: str
choices:
- "logRule"
required: true
condition:
description:
- A filter or mask to limit the source used in the flow defined by the service connector.
type: str
required: true
target:
description:
- ""
- Required for create using I(state=present).
- This parameter is updatable.
type: dict
suboptions:
kind:
description:
- The type descriminator.
type: str
choices:
- "notifications"
- "objectStorage"
- "monitoring"
- "functions"
- "streaming"
required: true
topic_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the topic.
- Required when kind is 'notifications'
type: str
namespace:
description:
- The namespace.
- Applicable when kind is 'objectStorage'
type: str
bucket_name:
description:
- The name of the bucket. Avoid entering confidential information.
- Required when kind is 'objectStorage'
type: str
object_name_prefix:
description:
- The prefix of the objects. Avoid entering confidential information.
- Applicable when kind is 'objectStorage'
type: str
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment containing the metric.
- Required when kind is 'monitoring'
type: str
metric_namespace:
description:
- The namespace of the metric.
- "Example: `oci_computeagent`"
- Required when kind is 'monitoring'
type: str
metric:
description:
- The name of the metric.
- "Example: `CpuUtilization`"
- Required when kind is 'monitoring'
type: str
function_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the function.
- Required when kind is 'functions'
type: str
stream_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the stream.
- Required when kind is 'streaming'
type: str
freeform_tags:
description:
- "Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\\"bar-key\\": \\"value\\"}`"
- This parameter is updatable.
type: dict
defined_tags:
description:
- "Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\\"foo-namespace\\": {\\"bar-key\\": \\"value\\"}}`"
- This parameter is updatable.
type: dict
service_connector_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the service connector.
- Required for update using I(state=present) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
- Required for delete using I(state=absent) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
type: str
aliases: ["id"]
state:
description:
- The state of the ServiceConnector.
- Use I(state=present) to create or update a ServiceConnector.
- Use I(state=absent) to delete a ServiceConnector.
type: str
required: false
default: 'present'
choices: ["present", "absent"]
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_creatable_resource, oracle.oci.oracle_wait_options ]
"""
EXAMPLES = """
- name: Create service_connector
oci_sch_service_connector:
display_name: display_name_example
compartment_id: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
source:
kind: logging
log_sources:
- compartment_id: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
target:
kind: notifications
- name: Update service_connector using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_sch_service_connector:
display_name: display_name_example
compartment_id: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
description: description_example
source:
kind: logging
log_sources:
- compartment_id: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
tasks:
- kind: logRule
condition: condition_example
target:
kind: notifications
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
- name: Update service_connector
oci_sch_service_connector:
display_name: display_name_example
description: description_example
service_connector_id: ocid1.serviceconnector.oc1..xxxxxxEXAMPLExxxxxx
- name: Delete service_connector
oci_sch_service_connector:
service_connector_id: ocid1.serviceconnector.oc1..xxxxxxEXAMPLExxxxxx
state: absent
- name: Delete service_connector using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_sch_service_connector:
display_name: display_name_example
compartment_id: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
state: absent
"""
RETURN = """
service_connector:
description:
- Details of the ServiceConnector resource acted upon by the current operation
returned: on success
type: complex
contains:
id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the service connector.
returned: on success
type: string
sample: ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx
display_name:
description:
- A user-friendly name. It does not have to be unique, and it is changeable.
Avoid entering confidential information.
returned: on success
type: string
sample: display_name_example
description:
description:
- The description of the resource. Avoid entering confidential information.
returned: on success
type: string
sample: description_example
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment containing the service connector.
returned: on success
type: string
sample: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
time_created:
description:
- "The date and time when the service connector was created.
Format is defined by L(RFC3339,https://tools.ietf.org/html/rfc3339).
Example: `2020-01-25T21:10:29.600Z`"
returned: on success
type: string
sample: 2020-01-25T21:10:29.600Z
time_updated:
description:
- "The date and time when the service connector was updated.
Format is defined by L(RFC3339,https://tools.ietf.org/html/rfc3339).
Example: `2020-01-25T21:10:29.600Z`"
returned: on success
type: string
sample: 2020-01-25T21:10:29.600Z
lifecycle_state:
description:
- The current state of the service connector.
returned: on success
type: string
sample: CREATING
lifecyle_details:
description:
- A message describing the current state in more detail.
For example, the message might provide actionable
information for a resource in a `FAILED` state.
returned: on success
type: string
sample: lifecyle_details_example
source:
description:
- ""
returned: on success
type: complex
contains:
kind:
description:
- The type descriminator.
returned: on success
type: string
sample: logging
log_sources:
description:
- The resources affected by this work request.
returned: on success
type: complex
contains:
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment containing the log
source.
returned: on success
type: string
sample: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
log_group_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the log group.
returned: on success
type: string
sample: ocid1.loggroup.oc1..xxxxxxEXAMPLExxxxxx
log_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the log.
returned: on success
type: string
sample: ocid1.log.oc1..xxxxxxEXAMPLExxxxxx
tasks:
description:
- The list of tasks.
returned: on success
type: complex
contains:
kind:
description:
- The type descriminator.
returned: on success
type: string
sample: logRule
condition:
description:
- A filter or mask to limit the source used in the flow defined by the service connector.
returned: on success
type: string
sample: condition_example
target:
description:
- ""
returned: on success
type: complex
contains:
kind:
description:
- The type descriminator.
returned: on success
type: string
sample: notifications
topic_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the topic.
returned: on success
type: string
sample: ocid1.topic.oc1..xxxxxxEXAMPLExxxxxx
namespace:
description:
- The namespace.
returned: on success
type: string
sample: namespace_example
bucket_name:
description:
- The name of the bucket. Avoid entering confidential information.
returned: on success
type: string
sample: bucket_name_example
object_name_prefix:
description:
- The prefix of the objects. Avoid entering confidential information.
returned: on success
type: string
sample: object_name_prefix_example
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment containing the metric.
returned: on success
type: string
sample: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
metric_namespace:
description:
- The namespace of the metric.
- "Example: `oci_computeagent`"
returned: on success
type: string
sample: oci_computeagent
metric:
description:
- The name of the metric.
- "Example: `CpuUtilization`"
returned: on success
type: string
sample: CpuUtilization
function_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the function.
returned: on success
type: string
sample: ocid1.function.oc1..xxxxxxEXAMPLExxxxxx
stream_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the stream.
returned: on success
type: string
sample: ocid1.stream.oc1..xxxxxxEXAMPLExxxxxx
freeform_tags:
description:
- "Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\\"bar-key\\": \\"value\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
defined_tags:
description:
- "Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\\"foo-namespace\\": {\\"bar-key\\": \\"value\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
system_tags:
description:
- "The system tags associated with this resource, if any. The system tags are set by Oracle Cloud Infrastructure services. Each key is
predefined and scoped to namespaces.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
Example: `{orcl-cloud: {free-tier-retain: true}}`"
returned: on success
type: dict
sample: {}
sample: {
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"display_name": "display_name_example",
"description": "description_example",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"time_created": "2020-01-25T21:10:29.600Z",
"time_updated": "2020-01-25T21:10:29.600Z",
"lifecycle_state": "CREATING",
"lifecyle_details": "lifecyle_details_example",
"source": {
"kind": "logging",
"log_sources": [{
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"log_group_id": "ocid1.loggroup.oc1..xxxxxxEXAMPLExxxxxx",
"log_id": "ocid1.log.oc1..xxxxxxEXAMPLExxxxxx"
}]
},
"tasks": [{
"kind": "logRule",
"condition": "condition_example"
}],
"target": {
"kind": "notifications",
"topic_id": "ocid1.topic.oc1..xxxxxxEXAMPLExxxxxx",
"namespace": "namespace_example",
"bucket_name": "bucket_name_example",
"object_name_prefix": "object_name_prefix_example",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"metric_namespace": "oci_computeagent",
"metric": "CpuUtilization",
"function_id": "ocid1.function.oc1..xxxxxxEXAMPLExxxxxx",
"stream_id": "ocid1.stream.oc1..xxxxxxEXAMPLExxxxxx"
},
"freeform_tags": {'Department': 'Finance'},
"defined_tags": {'Operations': {'CostCenter': 'US'}},
"system_tags": {}
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceHelperBase,
get_custom_class,
)
try:
from oci.sch import ServiceConnectorClient
from oci.sch.models import CreateServiceConnectorDetails
from oci.sch.models import UpdateServiceConnectorDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class ServiceConnectorHelperGen(OCIResourceHelperBase):
"""Supported operations: create, update, get, list and delete"""
def get_module_resource_id_param(self):
return "service_connector_id"
def get_module_resource_id(self):
return self.module.params.get("service_connector_id")
def get_get_fn(self):
return self.client.get_service_connector
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_service_connector,
service_connector_id=self.module.params.get("service_connector_id"),
)
def get_required_kwargs_for_list(self):
required_list_method_params = [
"compartment_id",
]
return dict(
(param, self.module.params[param]) for param in required_list_method_params
)
def get_optional_kwargs_for_list(self):
optional_list_method_params = ["display_name"]
return dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
and (
self._use_name_as_identifier()
or (
not self.module.params.get("key_by")
or param in self.module.params.get("key_by")
)
)
)
def list_resources(self):
required_kwargs = self.get_required_kwargs_for_list()
optional_kwargs = self.get_optional_kwargs_for_list()
kwargs = oci_common_utils.merge_dicts(required_kwargs, optional_kwargs)
return oci_common_utils.list_all_resources(
self.client.list_service_connectors, **kwargs
)
def get_create_model_class(self):
return CreateServiceConnectorDetails
def create_resource(self):
create_details = self.get_create_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.create_service_connector,
call_fn_args=(),
call_fn_kwargs=dict(create_service_connector_details=create_details,),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation=oci_common_utils.CREATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
def get_update_model_class(self):
return UpdateServiceConnectorDetails
def update_resource(self):
update_details = self.get_update_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.update_service_connector,
call_fn_args=(),
call_fn_kwargs=dict(
service_connector_id=self.module.params.get("service_connector_id"),
update_service_connector_details=update_details,
),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation=oci_common_utils.UPDATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
def delete_resource(self):
return oci_wait_utils.call_and_wait(
call_fn=self.client.delete_service_connector,
call_fn_args=(),
call_fn_kwargs=dict(
service_connector_id=self.module.params.get("service_connector_id"),
),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation=oci_common_utils.DELETE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
ServiceConnectorHelperCustom = get_custom_class("ServiceConnectorHelperCustom")
class ResourceHelper(ServiceConnectorHelperCustom, ServiceConnectorHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=True, supports_wait=True
)
module_args.update(
dict(
display_name=dict(aliases=["name"], type="str"),
compartment_id=dict(type="str"),
description=dict(type="str"),
source=dict(
type="dict",
options=dict(
kind=dict(type="str", required=True, choices=["logging"]),
log_sources=dict(
type="list",
elements="dict",
required=True,
options=dict(
compartment_id=dict(type="str", required=True),
log_group_id=dict(type="str"),
log_id=dict(type="str"),
),
),
),
),
tasks=dict(
type="list",
elements="dict",
options=dict(
kind=dict(type="str", required=True, choices=["logRule"]),
condition=dict(type="str", required=True),
),
),
target=dict(
type="dict",
options=dict(
kind=dict(
type="str",
required=True,
choices=[
"notifications",
"objectStorage",
"monitoring",
"functions",
"streaming",
],
),
topic_id=dict(type="str"),
namespace=dict(type="str"),
bucket_name=dict(type="str"),
object_name_prefix=dict(type="str"),
compartment_id=dict(type="str"),
metric_namespace=dict(type="str"),
metric=dict(type="str"),
function_id=dict(type="str"),
stream_id=dict(type="str"),
),
),
freeform_tags=dict(type="dict"),
defined_tags=dict(type="dict"),
service_connector_id=dict(aliases=["id"], type="str"),
state=dict(type="str", default="present", choices=["present", "absent"]),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="service_connector",
service_client_class=ServiceConnectorClient,
namespace="sch",
)
result = dict(changed=False)
if resource_helper.is_delete_using_name():
result = resource_helper.delete_using_name()
elif resource_helper.is_delete():
result = resource_helper.delete()
elif resource_helper.is_update_using_name():
result = resource_helper.update_using_name()
elif resource_helper.is_update():
result = resource_helper.update()
elif resource_helper.is_create():
result = resource_helper.create()
module.exit_json(**result)
if __name__ == "__main__":
main()
| 41.219048
| 159
| 0.568887
| 3,900
| 0.12873
| 0
| 0
| 0
| 0
| 0
| 0
| 22,784
| 0.752046
|
360379edca40aaeb8a9f20994bc3b04375f6c37f
| 210
|
py
|
Python
|
Kattis/fallingapart.py
|
ruidazeng/online-judge
|
6bdf8bbf1af885637dab474d0ccb58aff22a0933
|
[
"MIT"
] | null | null | null |
Kattis/fallingapart.py
|
ruidazeng/online-judge
|
6bdf8bbf1af885637dab474d0ccb58aff22a0933
|
[
"MIT"
] | null | null | null |
Kattis/fallingapart.py
|
ruidazeng/online-judge
|
6bdf8bbf1af885637dab474d0ccb58aff22a0933
|
[
"MIT"
] | 1
|
2020-06-22T21:07:24.000Z
|
2020-06-22T21:07:24.000Z
|
n = int(input())
intz = [int(x) for x in input().split()]
alice = 0
bob = 0
for i, num in zip(range(n), sorted(intz)[::-1]):
if i%2 == 0:
alice += num
else:
bob += num
print(alice, bob)
| 21
| 48
| 0.514286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3604769fe194e0541eba00a227334b835b8009c4
| 3,515
|
py
|
Python
|
ffnn/rbf.py
|
RaoulMa/NeuralNets
|
f49072ac88686f753f9b5815d6cc5e71d536c3d2
|
[
"MIT"
] | 1
|
2017-12-03T11:06:33.000Z
|
2017-12-03T11:06:33.000Z
|
ffnn/rbf.py
|
RaoulMa/BasicNeuralNets
|
f49072ac88686f753f9b5815d6cc5e71d536c3d2
|
[
"MIT"
] | null | null | null |
ffnn/rbf.py
|
RaoulMa/BasicNeuralNets
|
f49072ac88686f753f9b5815d6cc5e71d536c3d2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Description: Choose a set of data points as weights and calculate RBF nodes for the
first layer. Those are then used as inputs for a one-layer perceptron, which gives the
output
"""
import numpy as np
import pcn
class rbf:
""" radial basic function """
def __init__(self,inputs,targets,nRBF,sigma=0,normalise=0,eta=0.25,functype='sigmoid',traintype='batch'):
""" constructor """
self.inputs = inputs
self.targets = targets
self.nRBF = nRBF #number of RBF nodes
self.normalise = normalise
self.eta = eta #learning rate
self.functype = functype
self.traintype = traintype
#set width of gaussian
if sigma==0:
d = (self.inputs.max(axis=0)-self.inputs.min(axis=0)).max()
self.sigma = d/np.sqrt(2*nRBF)
else:
self.sigma = sigma
#input array of RBF nodes
self.hidden = np.zeros((np.shape(self.inputs)[0],self.nRBF))
#set RBF weights to be random datapoints
self.weights = np.zeros((np.shape(inputs)[1],self.nRBF))
indices = np.arange(np.shape(self.inputs)[0])
np.random.shuffle(indices)
for i in range(self.nRBF):
self.weights[:,i] = self.inputs[indices[i],:]
#calculate the hidden rbf nodes (first layer)
self.hidden = self.rbffwd(self.inputs,1)
#use initialise perceptron for second layer
self.perceptron = pcn.pcn(self.hidden,self.targets,self.eta,self.functype,self.traintype)
def errfunc(self,outputs,targets):
""" error function """
E = 1/2*np.trace(np.dot(np.transpose(targets-outputs),targets-outputs))
return E
def rbftrain(self,nIt=100):
""" training the network """
#train perceptron
self.perceptron.pcntrain(nIt)
def rbftrain_automatic(self,valid,validt,itSteps):
""" train the perceptron until the error on the validation data increases """
#calculate the hidden rbf nodes (first layer)
rbfvalid = self.rbffwd(valid,1)
trainerror = np.array([])
validerror = np.array([])
(trainerror,validerror) = self.perceptron.pcntrain_automatic(rbfvalid,validt,itSteps)
return trainerror,validerror
def rbffwd(self,inputs,layer):
""" run the network forward """
#rbf nodes
hidden = np.zeros((np.shape(inputs)[0],self.nRBF))
#calculate gaussian overlap of input with weights
for i in range(self.nRBF):
hidden[:,i] = np.exp(-np.sum((inputs - np.ones((1,np.shape(inputs)[1]))*self.weights[:,i])**2,axis=1)/(2*self.sigma**2))
#normalise RBF layer
if self.normalise:
hidden[:,:] /= np.transpose(np.ones((1,np.shape(hidden)[0]))*hidden[:,:].sum(axis=1))
#output of hidden (rbf) layer
outputs = hidden
#output of perceptron layer
if layer == 2:
outputs = self.perceptron.pcnfwd(hidden,True)
return outputs
def confmat(self,inputs,targets):
""" confusion matrix to evaluate the performance of the network """
#calculate hidden nodes
hidden = self.rbffwd(inputs,1)
#confusion matrix of perceptron
self.perceptron.confmat(hidden,targets)
return 0
| 30.301724
| 132
| 0.588905
| 3,237
| 0.92091
| 0
| 0
| 0
| 0
| 0
| 0
| 980
| 0.278805
|
36056f0439b548a97fafa104e15d32abf2f73d7b
| 836
|
py
|
Python
|
Bot/config.py
|
faelbreseghello/Monsters-Bot
|
9432cf05451ff36c3282a2d6873577e94239e724
|
[
"MIT"
] | 7
|
2020-07-13T22:31:00.000Z
|
2021-01-11T20:17:41.000Z
|
Bot/config.py
|
faelbreseghello/Monsters-Bot
|
9432cf05451ff36c3282a2d6873577e94239e724
|
[
"MIT"
] | 1
|
2020-08-19T18:58:07.000Z
|
2020-08-19T18:58:07.000Z
|
Bot/config.py
|
faelbreseghello/Monsters-Bot
|
9432cf05451ff36c3282a2d6873577e94239e724
|
[
"MIT"
] | 1
|
2021-01-11T21:36:08.000Z
|
2021-01-11T21:36:08.000Z
|
import datetime
import os
# General
Token = open('../Token.txt', 'r') # The token of the bot
Token = Token.read()
prefix = '*' # the command prefix
lang = 'en-us' # 'en-us' or 'pt-br'
memes = os.listdir('../Assets/monsters_memes') # memes db load
banchannel = None # the channel that will be used to ban messages
# Minigame setup
gamechannel = None # You can set here or with the command "*setup"
gameinterval = 3600 #interval between the sessions #TEMP VALUE
winnerPoints = 3 # points for who win the minigame
valid = False
end_day = 30 # The day of the end off the minigame - will verify at the start time
# log file path
logpath = '../logs'
# Language import
if lang == 'en-us':
from en_us import *
elif lang == 'pt-br':
from pt_br import *
else:
raise Exception(f'There are no lang option called {lang}')
| 26.967742
| 82
| 0.685407
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 490
| 0.586124
|
3605823cc24094c58501be0321e78ef090f4367d
| 11,294
|
py
|
Python
|
postscripts/_city_transformer_postscripts.py
|
yasahi-hpc/CityTransformer
|
b285525d860b4cd522a30823351ecd3cb74dcdf3
|
[
"MIT"
] | null | null | null |
postscripts/_city_transformer_postscripts.py
|
yasahi-hpc/CityTransformer
|
b285525d860b4cd522a30823351ecd3cb74dcdf3
|
[
"MIT"
] | null | null | null |
postscripts/_city_transformer_postscripts.py
|
yasahi-hpc/CityTransformer
|
b285525d860b4cd522a30823351ecd3cb74dcdf3
|
[
"MIT"
] | null | null | null |
"""
Convert data and then visualize
Data Manupulation
1. Save metrics for validation and test data
Save figures
1. Loss curve
2. plume dispersion and errors
3. metrics
"""
import pathlib
import numpy as np
import xarray as xr
from numpy import ma
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.style
from matplotlib.colors import LogNorm
from ._base_postscript import _BasePostscripts
from .metrics import get_metric
class CityTransformerPostscripts(_BasePostscripts):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.model_name = 'CityTransformer'
self.modes = ['val', 'test']
self.threshold = 0.5
self.clip = 1.e-8
self.alpha = 0.9
self.vmin = self.clip
self.vmax = 1.0
self.nb_bins = 100
self.fig_names = ['loss', 'contour', 'metrics']
self.extent = [-1024,1024,-1024,1024]
self.metrics = {'FAC2',
'FAC5',
'MG',
'VG',
'NAD',
'FB',
}
# Matplotlib settings
mpl.style.use('classic')
fontsize = 28
self.fontsize = fontsize
fontname = 'Times New Roman'
plt.rc('xtick', labelsize=fontsize)
plt.rc('ytick', labelsize=fontsize)
plt.rc('font', family=fontname)
self.title_font = {'fontname':fontname, 'size':fontsize, 'color':'black',
'verticalalignment':'bottom'}
self.axis_font = {'fontname':fontname, 'size':fontsize}
def __preprocess(self, epoch):
for mode in self.modes:
all_metrics = {metric_name: [] for metric_name in self.metrics}
nb_shots = self.nb_shots_dict[mode]
for i in range(nb_shots):
filename = pathlib.Path(self.inference_dir) / mode / f'{mode}{i:06}_epoch{epoch:04}.nc'
ds = xr.open_dataset(filename)
levelset = ds['levelset'].values
# Target metrics
metric_dict = {'FAC2': {'factor': 2, 'levelset': levelset},
'FAC5': {'factor': 5, 'levelset': levelset},
'MG': {'levelset': levelset},
'VG': {'levelset': levelset},
'NAD': {'levelset': levelset},
'FB': {'levelset': levelset},
}
evaluated_metrics = self.__evaluate_metrics(ds, metric_dict=metric_dict)
for metric_name in metric_dict.keys():
all_metrics[metric_name].append(evaluated_metrics[metric_name])
# Saving dataset
data_vars = {}
for metric_name, evaluated_values in all_metrics.items():
data_vars[metric_name] = (['shot_idx'], np.asarray(evaluated_values))
coords = {'shot_idx': np.arange(nb_shots)}
filename = self.data_dir / f'{mode}_epoch{epoch:04}.nc'
ds = xr.Dataset(data_vars=data_vars, coords=coords)
ds.to_netcdf(filename)
def __evaluate_metrics(self, ds, metric_dict):
evaluated_metrics = {}
pred, pred_binary = ds['pred_plume'].values.squeeze(), ds['pred_zeros_map'].values
ref, ref_binary = ds['ref_plume'].values.squeeze(), ds['ref_zeros_map'].values
levelset = ds['levelset'].values
pred = self.__mask_img(img=pred, binary=pred_binary, levelset=levelset, threshold=self.threshold, clip=self.clip)
ref = self.__mask_img(img=ref, binary=ref_binary, levelset=levelset, threshold=self.threshold, clip=self.clip)
for metric_name, kwargs in metric_dict.items():
metric = get_metric(metric_name)(**kwargs)
evaluated_metrics[metric_name] = metric.evaluate(pred, ref)
return evaluated_metrics
def __mask_img(self, img, binary, levelset, threshold, clip, apply_mask=False):
img, binary = np.squeeze(img), np.squeeze(binary)
mask = np.logical_or(binary<threshold, levelset >= 0.)
img = 10**img
img = np.where(mask, -1., img) * clip
if apply_mask:
return ma.masked_where(img <= 0, img)
else:
return img
def __classification_by_factor(self, pred, ref, levelset, threshold, clip):
"""
factor2 == 0
factor5 == 0.5
factor5++ == 1.0
"""
if type(pred) is tuple:
pred, pred_binary = pred
ref, ref_binary = ref
# Create mask based on zeros map and levelset
def mask_on_img(img, binary):
mask = np.logical_or(binary < threshold, levelset >= 0.)
img = 10**img
img = np.where(mask, -1, img) * clip
return img
pred = mask_on_img(pred, pred_binary)
ref = mask_on_img(ref, ref_binary)
factor = np.ones_like(ref) # Default 1.0
target_area = np.logical_and(ref > 0., levelset < 0)
fraction = np.where(target_area, pred/ref, 0)
fac2_area = np.logical_and( fraction >= 1/2., fraction <= 2. )
fac5_area = np.logical_and( fraction >= 1/5., fraction <= 5. )
fac2_area = np.logical_and(target_area, fac2_area)
fac5_area = np.logical_and(target_area, fac5_area)
factor[fac5_area] = np.ones_like(ref)[fac5_area] * 0.5
factor[fac2_area] = np.zeros_like(ref)[fac2_area]
correct_zeros = np.logical_and(pred_binary < 0.5, ref_binary < 0.5)
masked_fraction = ma.masked_where(np.logical_or(correct_zeros, levelset >= 0.), factor)
return masked_fraction
def _visualize(self, epoch):
self.data_dir = self.img_dir / 'metrics/data'
if not self.data_dir.exists():
self.data_dir.mkdir(parents=True)
super()._visualize_loss()
self.__preprocess(epoch)
self.__visualize_plume_dispersion(epoch)
self.__visualize_metrics(epoch)
def __visualize_plume_dispersion(self, epoch):
figsize = (8, 8)
for mode in self.modes:
nb_shots = self.nb_shots_dict[mode]
for i in range(nb_shots):
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=figsize,
subplot_kw={'xticks':[], 'yticks':[]}, gridspec_kw=dict(hspace=0.1, wspace=0.05))
axes[1, 0].set_visible(False)
filename = pathlib.Path(self.inference_dir) / mode / f'{mode}{i:06}_epoch{epoch:04}.nc'
ds = xr.open_dataset(filename)
levelset = ds['levelset'].values
x, y = ds.attrs['release_x'], ds.attrs['release_y']
# apply masks
pred, pred_binary = ds['pred_plume'].values.squeeze(), ds['pred_zeros_map'].values
ref, ref_binary = ds['ref_plume'].values.squeeze(), ds['ref_zeros_map'].values
levelset = ds['levelset'].values
factor = self.__classification_by_factor((pred, pred_binary), (ref, ref_binary), levelset=levelset, threshold=self.threshold, clip=self.clip)
masked_pred = self.__mask_img(img=pred, binary=pred_binary, levelset=levelset, threshold=self.threshold, clip=self.clip, apply_mask=True)
masked_ref = self.__mask_img(img=ref, binary=ref_binary, levelset=levelset, threshold=self.threshold, clip=self.clip, apply_mask=True)
# Plotting the ground truth and prediction
im = axes[0, 0].imshow(levelset < 0., cmap='gray', origin='lower', extent=self.extent, interpolation='none')
im = axes[0, 0].imshow(masked_ref, cmap='coolwarm', origin='lower', extent=self.extent, norm=LogNorm(vmin=self.vmin, vmax=self.vmax), alpha=self.alpha, interpolation='none')
axes[0, 0].plot(x, y, color='none', marker='*', markeredgecolor='g', markeredgewidth=2, markersize=12)
im = axes[0, 1].imshow(levelset < 0., cmap='gray', origin='lower', extent=self.extent, interpolation='none')
im = axes[0, 1].imshow(masked_pred, cmap='coolwarm', origin='lower', extent=self.extent, norm=LogNorm(vmin=self.vmin, vmax=self.vmax), alpha=self.alpha, interpolation='none')
axes[0, 1].plot(x, y, color='none', marker='*', markeredgecolor='g', markeredgewidth=2, markersize=12)
# Plotting the factor map
im2 = axes[1, 1].imshow(levelset < 0., cmap='gray', origin='lower', extent=self.extent, interpolation='none')
im2 = axes[1, 1].imshow(factor, cmap='jet', origin='lower', extent=self.extent, vmin=0, vmax=1, alpha=self.alpha, interpolation='none')
axes[1, 1].plot(x, y, color='none', marker='*', markeredgecolor='g', markeredgewidth=2, markersize=12)
axes[0, 0].set_title('Ground Truth', **self.title_font)
axes[0, 1].set_title(f'{self.arch_name}', **self.title_font)
cbar = fig.colorbar(im, ax=axes[0, :])
cbar2 = fig.colorbar(im2, ax=axes[1, :])
cbar2.remove()
figname = self.img_dir / 'contour' / f'log_{mode}{i:06}_epoch{epoch:04}.png'
plt.savefig(figname, bbox_inches='tight')
plt.close('all')
def __visualize_metrics(self, epoch):
figsize = (20, 12)
plot_dict = {}
# key: metric_name, value: xmin, xmax, ymin, ymax, label
# xmin, xmax are also used to make histogram
plot_dict['FAC2'] = (0, 1, 0, 0.05, 'FAC_2')
plot_dict['FAC5'] = (0, 1, 0, 0.1, 'FAC_5')
plot_dict['FB'] = (-2, 2, 0, 0.05, 'FB')
plot_dict['NAD'] = (0, 0.15, 0, 0.15, 'NAD')
plot_dict['MG'] = (0, 2, 0, 0.1, 'MG')
plot_dict['VG'] = (1, 1.15, 0, 0.5, 'VG')
metric_names = plot_dict.keys()
for mode in self.modes:
filename = self.data_dir / f'{mode}_epoch{epoch:04}.nc'
ds = xr.open_dataset(filename)
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=figsize)
for metric_name, ax in zip(metric_names, axes.flatten()):
xmin, xmax, ymin, ymax, label = plot_dict[metric_name]
bins = np.linspace(xmin, xmax, self.nb_bins)
metric = ds[metric_name].values
weights = np.ones_like(metric) / len(metric)
_hist, _bins, _patches = ax.hist(metric, bins=bins, alpha=0.5, weights=weights, label=self.arch_name)
average = np.mean( np.abs(metric) )
std = np.std( np.abs(metric) )
print(f'model: {self.arch_name}, metric_name: {metric_name}, average: {average}, std: {std}')
ax.set_xlim([xmin, xmax])
ax.set_ylim([ymin, ymax])
ax.set_title(metric_name, **self.title_font)
ax.legend(loc='upper right', prop={'size': self.fontsize*0.6})
ax.grid(ls='dashed', lw=1)
figname = self.img_dir / 'metrics' / f'metric_{self.arch_name}.png'
plt.savefig(figname, bbox_inches='tight')
plt.close('all')
| 43.775194
| 190
| 0.573579
| 10,844
| 0.960156
| 0
| 0
| 0
| 0
| 0
| 0
| 1,675
| 0.148309
|
3606767125c21d0e6b93352716d5f01b3c40e053
| 664
|
py
|
Python
|
OrangeInstaller/OrangeInstaller/Testing.py
|
mcolombo87/OrangeInstaller
|
31486ed532409f08d3b22cd7fdb05f209e3fc3e8
|
[
"Apache-2.0"
] | 3
|
2017-04-08T13:52:22.000Z
|
2018-10-31T20:17:20.000Z
|
OrangeInstaller/OrangeInstaller/Testing.py
|
mcolombo87/OrangeInstaller
|
31486ed532409f08d3b22cd7fdb05f209e3fc3e8
|
[
"Apache-2.0"
] | 46
|
2017-03-16T10:20:11.000Z
|
2018-11-16T15:54:38.000Z
|
OrangeInstaller/OrangeInstaller/Testing.py
|
mcolombo87/OrangeInstaller
|
31486ed532409f08d3b22cd7fdb05f209e3fc3e8
|
[
"Apache-2.0"
] | 1
|
2018-08-12T01:10:41.000Z
|
2018-08-12T01:10:41.000Z
|
from Functions import functions, systemTools
import unittest
import sys
class systemToolsTests(unittest.TestCase):
"""
Class for testing
"""
def test_checkSystemTools(self):
check = False
if systemTools.isWindows() == True:
check=True
self.assertEqual(sys.platform.startswith("win"), True, "OI.SystemTool for check OS is different to sys method")
if systemTools.isLinux() == True:
check=True
self.assertEqual(sys.platform.startswith("linux"), True, "OI.SystemTool for check OS is different to sys method")
if __name__ == '__main__':
unittest.main()
| 31.619048
| 125
| 0.641566
| 526
| 0.792169
| 0
| 0
| 0
| 0
| 0
| 0
| 165
| 0.248494
|
36067e37b136228914619d3370100e13fb6c3ddf
| 61,464
|
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_collections/cisco/iosxr/plugins/module_utils/network/iosxr/argspec/bgp_global/bgp_global.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 1
|
2020-01-22T13:11:23.000Z
|
2020-01-22T13:11:23.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/cisco/iosxr/plugins/module_utils/network/iosxr/argspec/bgp_global/bgp_global.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/cisco/iosxr/plugins/module_utils/network/iosxr/argspec/bgp_global/bgp_global.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2021 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the
# cli_rm_builder.
#
# Manually editing this file is not advised.
#
# To update the argspec make the desired changes
# in the module docstring and re-run
# cli_rm_builder.
#
#############################################
"""
The arg spec for the iosxr_bgp_global module
"""
class Bgp_globalArgs(object): # pylint: disable=R0903
"""The arg spec for the iosxr_bgp_global module
"""
def __init__(self, **kwargs):
pass
argument_spec = {
"config": {
"type": "dict",
"options": {
"as_number": {"type": "str"},
"bfd": {
"type": "dict",
"options": {
"minimum_interval": {"type": "int"},
"multiplier": {"type": "int"},
},
},
"bgp": {
"type": "dict",
"options": {
"as_path_loopcheck": {"type": "bool"},
"auto_policy_soft_reset": {
"type": "dict",
"options": {"disable": {"type": "bool"}},
},
"bestpath": {
"type": "dict",
"options": {
"as_path": {
"type": "dict",
"options": {
"ignore": {"type": "bool"},
"multipath_relax": {"type": "bool"},
},
},
"aigp": {
"type": "dict",
"options": {"ignore": {"type": "bool"}},
},
"med": {
"type": "dict",
"options": {
"always": {"type": "bool"},
"confed": {"type": "bool"},
"missing_as_worst": {"type": "bool"},
},
},
"compare_routerid": {"type": "bool"},
"cost_community": {
"type": "dict",
"options": {"ignore": {"type": "bool"}},
},
"origin_as": {
"type": "dict",
"options": {
"use": {
"type": "dict",
"options": {
"validity": {"type": "bool"}
},
},
"allow": {
"type": "dict",
"options": {
"invalid": {"type": "bool"}
},
},
},
},
},
},
"cluster_id": {"type": "str"},
"confederation": {
"type": "dict",
"options": {
"identifier": {"type": "int"},
"peers": {"type": "list", "elements": "int"},
},
},
"default": {
"type": "dict",
"options": {"local_preference": {"type": "int"}},
},
"enforce_first_as": {
"type": "dict",
"options": {"disable": {"type": "bool"}},
},
"fast_external_fallover": {
"type": "dict",
"options": {"disable": {"type": "bool"}},
},
"graceful_restart": {
"type": "dict",
"options": {
"set": {"type": "bool"},
"graceful_reset": {"type": "bool"},
"restart_time": {"type": "int"},
"purge_time": {"type": "int"},
"stalepath_time": {"type": "int"},
},
},
"install": {
"type": "dict",
"options": {"diversion": {"type": "bool"}},
},
"log": {
"type": "dict",
"options": {
"log_message": {
"type": "dict",
"options": {"disable": {"type": "bool"}},
},
"neighbor": {
"type": "dict",
"options": {
"changes": {
"type": "dict",
"options": {
"detail": {"type": "bool"},
"disable": {"type": "bool"},
},
}
},
},
},
},
"maximum": {
"type": "dict",
"options": {"neighbor": {"type": "int"}},
},
"multipath": {
"type": "dict",
"options": {
"as_path": {
"type": "dict",
"options": {
"ignore": {
"type": "dict",
"options": {
"onwards": {"type": "bool"}
},
}
},
}
},
},
"origin_as": {
"type": "dict",
"options": {
"validation": {
"type": "dict",
"options": {
"disable": {"type": "bool"},
"signal": {
"type": "dict",
"options": {
"ibgp": {"type": "bool"}
},
},
"time": {
"type": "dict",
"options": {
"time_off": {"type": "bool"},
"time_in_second": {
"type": "int"
},
},
},
},
}
},
},
"redistribute_internal": {"type": "bool"},
"router_id": {"type": "str"},
"scan_time": {"type": "int"},
"unsafe_ebgp_policy": {"type": "bool"},
"update_delay": {"type": "int"},
},
},
"default_information": {
"type": "dict",
"options": {"originate": {"type": "bool"}},
},
"default_metric": {"type": "int"},
"graceful_maintenance": {
"type": "dict",
"options": {
"activate": {
"type": "str",
"choices": [
"all-neighbors",
"retain-routes",
"all-neighbors retain-routes",
"",
],
}
},
},
"ibgp": {
"type": "dict",
"options": {
"policy": {
"type": "dict",
"options": {
"out": {
"type": "dict",
"options": {
"enforce_modifications": {
"type": "bool"
}
},
}
},
}
},
},
"mpls": {
"type": "dict",
"options": {
"activate": {
"type": "dict",
"options": {"interface": {"type": "str"}},
}
},
},
"mvpn": {"type": "bool"},
"neighbors": {
"type": "list",
"elements": "dict",
"options": {
"neighbor": {"type": "str", "required": True},
"advertisement_interval": {"type": "int"},
"bfd": {
"type": "dict",
"options": {
"fast_detect": {
"type": "dict",
"options": {
"disable": {"type": "bool"},
"strict_mode": {"type": "bool"},
},
},
"multiplier": {"type": "int"},
"minimum_interval": {"type": "int"},
},
},
"bmp_activate": {
"type": "dict",
"options": {"server": {"type": "int"}},
},
"capability": {
"type": "dict",
"options": {
"additional_paths": {
"type": "dict",
"options": {
"send": {
"type": "dict",
"options": {
"set": {"type": "bool"},
"disable": {"type": "bool"},
},
},
"receive": {
"type": "dict",
"options": {
"set": {"type": "bool"},
"disable": {"type": "bool"},
},
},
},
},
"suppress": {
"type": "dict",
"options": {
"four_byte_AS": {
"type": "dict",
"options": {
"set": {"type": "bool"}
},
},
"all": {
"type": "dict",
"options": {
"inheritance_disable": {
"type": "bool"
},
"set": {"type": "bool"},
},
},
},
},
},
},
"cluster_id": {"type": "str"},
"description": {"type": "str"},
"dmz_link_bandwidth": {
"type": "dict",
"options": {
"inheritance_disable": {"type": "bool"},
"set": {"type": "bool"},
},
},
"dscp": {"type": "str"},
"ebgp_multihop": {
"type": "dict",
"options": {
"value": {"type": "int"},
"mpls": {"type": "bool"},
},
},
"ebgp_recv_extcommunity_dmz": {
"type": "dict",
"options": {
"inheritance_disable": {"type": "bool"},
"set": {"type": "bool"},
},
},
"ebgp_send_extcommunity_dmz": {
"type": "dict",
"options": {
"inheritance_disable": {"type": "bool"},
"cumulatie": {"type": "bool"},
"set": {"type": "bool"},
},
},
"egress_engineering": {
"type": "dict",
"options": {
"inheritance_disable": {"type": "bool"},
"set": {"type": "bool"},
},
},
"enforce_first_as": {
"type": "dict",
"options": {"disable": {"type": "bool"}},
},
"graceful_maintenance": {
"type": "dict",
"options": {
"set": {"type": "bool"},
"activate": {
"type": "dict",
"options": {
"inheritance_disable": {
"type": "bool"
},
"set": {"type": "bool"},
},
},
"as_prepends": {
"type": "dict",
"options": {
"inheritance_disable": {
"type": "bool"
},
"value": {"type": "int"},
},
},
"local_preference": {
"type": "dict",
"options": {
"value": {"type": "int"},
"inheritance_disable": {
"type": "bool"
},
},
},
},
},
"graceful_restart": {
"type": "dict",
"options": {
"restart_time": {"type": "int"},
"stalepath_time": {"type": "int"},
},
},
"ignore_connected_check": {
"type": "dict",
"options": {
"inheritance_disable": {"type": "bool"},
"set": {"type": "bool"},
},
},
"keychain": {
"type": "dict",
"no_log": False,
"options": {
"name": {"type": "str"},
"inheritance_disable": {"type": "bool"},
},
},
"local": {
"type": "dict",
"options": {
"address": {
"type": "dict",
"options": {
"ipv4_address": {"type": "str"},
"inheritance_disable": {
"type": "bool"
},
},
}
},
},
"local_as": {
"type": "dict",
"options": {
"value": {"type": "int"},
"inheritance_disable": {"type": "bool"},
},
},
"log": {
"type": "dict",
"options": {
"log_message": {
"type": "dict",
"options": {
"in": {
"type": "dict",
"options": {
"value": {"type": "int"},
"disable": {"type": "bool"},
"inheritance_disable": {
"type": "bool"
},
},
},
"out": {
"type": "dict",
"options": {
"value": {"type": "int"},
"disable": {"type": "bool"},
"inheritance_disable": {
"type": "bool"
},
},
},
},
}
},
},
"origin_as": {
"type": "dict",
"options": {
"validation": {
"type": "dict",
"options": {"disable": {"type": "bool"}},
}
},
},
"receive_buffer_size": {"type": "int"},
"remote_as": {"type": "int"},
"send_buffer_size": {"type": "int"},
"session_open_mode": {
"type": "str",
"choices": ["active-only", "both", "passive-only"],
},
"shutdown": {
"type": "dict",
"options": {
"inheritance_disable": {"type": "bool"},
"set": {"type": "bool"},
},
},
"tcp": {
"type": "dict",
"options": {
"mss": {
"type": "dict",
"options": {
"value": {"type": "int"},
"inheritance_disable": {
"type": "bool"
},
},
}
},
},
"timers": {
"type": "dict",
"options": {
"keepalive_time": {"type": "int"},
"holdtime": {"type": "int"},
},
},
"ttl_security": {
"type": "dict",
"options": {
"inheritance_disable": {"type": "bool"},
"set": {"type": "bool"},
},
},
"update": {
"type": "dict",
"options": {
"in": {
"type": "dict",
"options": {
"filtering": {
"type": "dict",
"options": {
"attribute_filter": {
"type": "dict",
"options": {
"group": {
"type": "str"
}
},
},
"logging": {
"type": "dict",
"options": {
"disable": {
"type": "bool"
}
},
},
"update_message": {
"type": "dict",
"options": {
"buffers": {
"type": "int"
}
},
},
},
}
},
}
},
},
"update_source": {"type": "str"},
},
},
"nsr": {
"type": "dict",
"options": {
"set": {"type": "bool"},
"disable": {"type": "bool"},
},
},
"socket": {
"type": "dict",
"options": {
"receive_buffer_size": {"type": "int"},
"send_buffer_size": {"type": "int"},
},
},
"timers": {
"type": "dict",
"options": {
"keepalive_time": {"type": "int"},
"holdtime": {"type": "int"},
},
},
"update": {
"type": "dict",
"options": {
"in": {
"type": "dict",
"options": {
"error_handling": {
"type": "dict",
"options": {
"basic": {
"type": "dict",
"options": {
"ebgp": {
"type": "dict",
"options": {
"disable": {
"type": "bool"
}
},
},
"ibgp": {
"type": "dict",
"options": {
"disable": {
"type": "bool"
}
},
},
},
},
"extended": {
"type": "dict",
"options": {
"ebgp": {"type": "bool"},
"ibgp": {"type": "bool"},
},
},
},
}
},
},
"out": {
"type": "dict",
"options": {"logging": {"type": "bool"}},
},
"limit": {"type": "int"},
},
},
"rpki": {
"type": "dict",
"options": {
"route": {
"type": "dict",
"options": {
"value": {"type": "str"},
"max": {"type": "int"},
"origin": {"type": "int"},
},
},
"servers": {
"type": "list",
"elements": "dict",
"options": {
"name": {"type": "str"},
"purge_time": {"type": "int"},
"refresh_time": {
"type": "dict",
"options": {
"value": {"type": "int"},
"time_off": {"type": "bool"},
},
},
"response_time": {
"type": "dict",
"options": {
"value": {"type": "int"},
"time_off": {"type": "bool"},
},
},
"shutdown": {"type": "bool"},
"transport": {
"type": "dict",
"options": {
"ssh": {
"type": "dict",
"options": {
"port": {"type": "int"}
},
},
"tcp": {
"type": "dict",
"options": {
"port": {"type": "int"}
},
},
},
},
},
},
},
},
"vrfs": {
"type": "list",
"elements": "dict",
"options": {
"vrf": {"type": "str"},
"bfd": {
"type": "dict",
"options": {
"minimum_interval": {"type": "int"},
"multiplier": {"type": "int"},
},
},
"bgp": {
"type": "dict",
"options": {
"auto_policy_soft_reset": {
"type": "dict",
"options": {"disable": {"type": "bool"}},
},
"bestpath": {
"type": "dict",
"options": {
"as_path": {
"type": "dict",
"options": {
"ignore": {"type": "bool"},
"multipath_relax": {
"type": "bool"
},
},
},
"aigp": {
"type": "dict",
"options": {
"ignore": {"type": "bool"}
},
},
"med": {
"type": "dict",
"options": {
"always": {"type": "bool"},
"confed": {"type": "bool"},
"missing_as_worst": {
"type": "bool"
},
},
},
"compare_routerid": {"type": "bool"},
"cost_community": {
"type": "dict",
"options": {
"ignore": {"type": "bool"}
},
},
"origin_as": {
"type": "dict",
"options": {
"use": {
"type": "dict",
"options": {
"validity": {
"type": "bool"
}
},
},
"allow": {
"type": "dict",
"options": {
"invalid": {
"type": "bool"
}
},
},
},
},
},
},
"default": {
"type": "dict",
"options": {
"local_preference": {"type": "int"}
},
},
"enforce_first_as": {
"type": "dict",
"options": {"disable": {"type": "bool"}},
},
"fast_external_fallover": {
"type": "dict",
"options": {"disable": {"type": "bool"}},
},
"log": {
"type": "dict",
"options": {
"log_message": {
"type": "dict",
"options": {
"disable": {"type": "bool"}
},
},
"neighbor": {
"type": "dict",
"options": {
"changes": {
"type": "dict",
"options": {
"detail": {
"type": "bool"
},
"disable": {
"type": "bool"
},
},
}
},
},
},
},
"multipath": {
"type": "dict",
"options": {
"as_path": {
"type": "dict",
"options": {
"ignore": {
"type": "dict",
"options": {
"onwards": {
"type": "bool"
}
},
}
},
}
},
},
"redistribute_internal": {"type": "bool"},
"router_id": {"type": "str"},
"unsafe_ebgp_policy": {"type": "bool"},
},
},
"default_information": {
"type": "dict",
"options": {"originate": {"type": "bool"}},
},
"default_metric": {"type": "int"},
"mpls": {
"type": "dict",
"options": {
"activate": {
"type": "dict",
"options": {"interface": {"type": "str"}},
}
},
},
"neighbors": {
"type": "list",
"elements": "dict",
"options": {
"neighbor": {"type": "str", "required": True},
"advertisement_interval": {"type": "int"},
"bfd": {
"type": "dict",
"options": {
"fast_detect": {
"type": "dict",
"options": {
"disable": {"type": "bool"},
"strict_mode": {
"type": "bool"
},
},
},
"multiplier": {"type": "int"},
"minimum_interval": {"type": "int"},
},
},
"bmp_activate": {
"type": "dict",
"options": {"server": {"type": "int"}},
},
"capability": {
"type": "dict",
"options": {
"additional_paths": {
"type": "dict",
"options": {
"send": {
"type": "dict",
"options": {
"set": {
"type": "bool"
},
"disable": {
"type": "bool"
},
},
},
"receive": {
"type": "dict",
"options": {
"set": {
"type": "bool"
},
"disable": {
"type": "bool"
},
},
},
},
},
"suppress": {
"type": "dict",
"options": {
"four_byte_AS": {
"type": "dict",
"options": {
"set": {"type": "bool"}
},
},
"all": {
"type": "dict",
"options": {
"inheritance_disable": {
"type": "bool"
},
"set": {
"type": "bool"
},
},
},
},
},
},
},
"cluster_id": {"type": "str"},
"description": {"type": "str"},
"dmz_link_bandwidth": {
"type": "dict",
"options": {
"inheritance_disable": {
"type": "bool"
},
"set": {"type": "bool"},
},
},
"dscp": {"type": "str"},
"ebgp_multihop": {
"type": "dict",
"options": {
"value": {"type": "int"},
"mpls": {"type": "bool"},
},
},
"ebgp_recv_extcommunity_dmz": {
"type": "dict",
"options": {
"inheritance_disable": {
"type": "bool"
},
"set": {"type": "bool"},
},
},
"ebgp_send_extcommunity_dmz": {
"type": "dict",
"options": {
"inheritance_disable": {
"type": "bool"
},
"cumulatie": {"type": "bool"},
"set": {"type": "bool"},
},
},
"egress_engineering": {
"type": "dict",
"options": {
"inheritance_disable": {
"type": "bool"
},
"set": {"type": "bool"},
},
},
"enforce_first_as": {
"type": "dict",
"options": {"disable": {"type": "bool"}},
},
"graceful_maintenance": {
"type": "dict",
"options": {
"set": {"type": "bool"},
"activate": {
"type": "dict",
"options": {
"inheritance_disable": {
"type": "bool"
},
"set": {"type": "bool"},
},
},
"as_prepends": {
"type": "dict",
"options": {
"inheritance_disable": {
"type": "bool"
},
"value": {"type": "int"},
},
},
"local_preference": {
"type": "dict",
"options": {
"value": {"type": "int"},
"inheritance_disable": {
"type": "bool"
},
},
},
},
},
"graceful_restart": {
"type": "dict",
"options": {
"restart_time": {"type": "int"},
"stalepath_time": {"type": "int"},
},
},
"ignore_connected_check": {
"type": "dict",
"options": {
"inheritance_disable": {
"type": "bool"
},
"set": {"type": "bool"},
},
},
"keychain": {
"type": "dict",
"no_log": False,
"options": {
"name": {"type": "str"},
"inheritance_disable": {
"type": "bool"
},
},
},
"local": {
"type": "dict",
"options": {
"address": {
"type": "dict",
"options": {
"ipv4_address": {
"type": "str"
},
"inheritance_disable": {
"type": "bool"
},
},
}
},
},
"local_as": {
"type": "dict",
"options": {
"value": {"type": "int"},
"inheritance_disable": {
"type": "bool"
},
},
},
"log": {
"type": "dict",
"options": {
"log_message": {
"type": "dict",
"options": {
"in": {
"type": "dict",
"options": {
"value": {
"type": "int"
},
"disable": {
"type": "bool"
},
"inheritance_disable": {
"type": "bool"
},
},
},
"out": {
"type": "dict",
"options": {
"value": {
"type": "int"
},
"disable": {
"type": "bool"
},
"inheritance_disable": {
"type": "bool"
},
},
},
},
}
},
},
"origin_as": {
"type": "dict",
"options": {
"validation": {
"type": "dict",
"options": {
"disable": {"type": "bool"}
},
}
},
},
"receive_buffer_size": {"type": "int"},
"remote_as": {"type": "int"},
"send_buffer_size": {"type": "int"},
"session_open_mode": {
"type": "str",
"choices": [
"active-only",
"both",
"passive-only",
],
},
"shutdown": {
"type": "dict",
"options": {
"inheritance_disable": {
"type": "bool"
},
"set": {"type": "bool"},
},
},
"tcp": {
"type": "dict",
"options": {
"mss": {
"type": "dict",
"options": {
"value": {"type": "int"},
"inheritance_disable": {
"type": "bool"
},
},
}
},
},
"timers": {
"type": "dict",
"options": {
"keepalive_time": {"type": "int"},
"holdtime": {"type": "int"},
},
},
"ttl_security": {
"type": "dict",
"options": {
"inheritance_disable": {
"type": "bool"
},
"set": {"type": "bool"},
},
},
"update": {
"type": "dict",
"options": {
"in": {
"type": "dict",
"options": {
"filtering": {
"type": "dict",
"options": {
"attribute_filter": {
"type": "dict",
"options": {
"group": {
"type": "str"
}
},
},
"logging": {
"type": "dict",
"options": {
"disable": {
"type": "bool"
}
},
},
"update_message": {
"type": "dict",
"options": {
"buffers": {
"type": "int"
}
},
},
},
}
},
}
},
},
"update_source": {"type": "str"},
},
},
"rd": {
"type": "dict",
"options": {"auto": {"type": "bool"}},
},
"socket": {
"type": "dict",
"options": {
"receive_buffer_size": {"type": "int"},
"send_buffer_size": {"type": "int"},
},
},
"timers": {
"type": "dict",
"options": {
"keepalive_time": {"type": "int"},
"holdtime": {"type": "int"},
},
},
},
},
},
},
"running_config": {"type": "str"},
"state": {
"type": "str",
"choices": [
"deleted",
"merged",
"replaced",
"gathered",
"rendered",
"parsed",
"purged",
],
"default": "merged",
},
} # pylint: disable=C0301
| 49.647819
| 82
| 0.149974
| 60,778
| 0.988839
| 0
| 0
| 0
| 0
| 0
| 0
| 12,281
| 0.199808
|
36081a586f2b7afca6efc6de5e1d5480c80b61dc
| 7,039
|
py
|
Python
|
quince/ui/components/game_frame.py
|
DnrkasEFF/quince
|
89b5699a63642fd1ed172b566670b4dd8a2f8e18
|
[
"MIT"
] | null | null | null |
quince/ui/components/game_frame.py
|
DnrkasEFF/quince
|
89b5699a63642fd1ed172b566670b4dd8a2f8e18
|
[
"MIT"
] | null | null | null |
quince/ui/components/game_frame.py
|
DnrkasEFF/quince
|
89b5699a63642fd1ed172b566670b4dd8a2f8e18
|
[
"MIT"
] | null | null | null |
"""
The primary frame containing the content for the entire game
"""
import tkinter as tk
import random as random
from quince.utility import is_valid_pickup
from quince.ronda import Ronda
from quince.ui.components.opponents.opponent_frame \
import OpponentFrameHorizontal, OpponentFrameVertical
from quince.ui.components.table.table import Table
from quince.ui.components.player.player_frame import PlayerFrame
class GameFrame(tk.Frame):
"""Tk frame containing the main gameplay display including
cards, decks, and avatars."""
def __init__(self, parent, player, npc1, npc2, npc3, display_scores):
"""Instantiate a new GameFrame
Args:
parent (Tk widget)
player - Player object representing the (human) user
npc1 (NPC) - Shadow player (opponent)
npc2 (NPC) - Shadow player (opponent)
npc3 (NPC) - Shadow player (opponent)
display_scores (function) - Callback to execute when
a ronda is finished
"""
tk.Frame.__init__(self, parent)
self.parent = parent
self.display_scores = display_scores
self.grid_rowconfigure(0, weight=1)
self.grid_rowconfigure(1, weight=3)
self.grid_rowconfigure(2, weight=1)
self.grid_columnconfigure(0, weight=1)
self.grid_columnconfigure(1, weight=3)
self.grid_columnconfigure(2, weight=1)
self.npc1 = npc1
self.npc2 = npc2
self.npc3 = npc3
self.player = player
self.selected_table_cards = []
self.ronda = Ronda.start([self.player,
self.npc1,
self.npc2,
self.npc3],
self.npc3)
# OPPONENT 1
opp1_hand_size = len(self.ronda.player_cards[self.npc1]['hand'])
opp1_active = self.ronda.current_player is self.npc1
self.opp1 = OpponentFrameVertical(self,
self.npc1.image(),
self.npc1.name(),
opp1_active,
opp1_hand_size)
self.opp1.grid(row=1, column=0)
# OPPONENT 2
opp2_active = self.ronda.current_player is self.npc2
opp2_hand_size = len(self.ronda.player_cards[self.npc2]['hand'])
self.opp2 = OpponentFrameHorizontal(self,
self.npc2.image(),
self.npc2.name(),
opp2_active,
opp2_hand_size)
self.opp2.grid(row=0, column=1)
# OPPONENT 3
opp3_active = self.ronda.current_player is self.npc3
opp3_hand_size = len(self.ronda.player_cards[self.npc3]['hand'])
self.opp3 = OpponentFrameVertical(self,
self.npc3.image(),
self.npc3.name(),
opp3_active,
opp3_hand_size)
self.opp3.grid(row=1, column=2)
# PLAYER
myhand = self.ronda.player_cards[self.player]['hand']
player_is_active = self.ronda.current_player is self.player
self.hud = PlayerFrame(self,
self.player,
myhand,
player_is_active,
self.play_hand)
self.hud.grid(row=2, column=0, columnspan=3)
# TABLE
table_cards = self.ronda.current_mesa
self.tbl = Table(self, table_cards, self.register_table_card_selection)
self.tbl.grid(row=1, column=1)
def draw(self):
"""Update all widgets on the frame"""
self.selected_table_cards = []
table_cards = self.ronda.current_mesa
current_player = self.ronda.current_player
# OPPONENT 1
opp1_hand_size = len(self.ronda.player_cards[self.npc1]['hand'])
opp1_active = self.ronda.current_player is self.npc1
self.opp1.refresh(opp1_hand_size, opp1_active)
# OPPONENT 2
opp2_active = current_player is self.npc2
opp2_hand_size = len(self.ronda.player_cards[self.npc2]['hand'])
self.opp2.refresh(opp2_hand_size, opp2_active)
# OPPONENT 3
opp3_active = current_player is self.npc3
opp3_hand_size = len(self.ronda.player_cards[self.npc3]['hand'])
self.opp3.refresh(opp3_hand_size, opp3_active)
# PLAYER
myhand = self.ronda.player_cards[self.player]['hand']
player_is_active = current_player is self.player
self.hud.refresh(myhand, player_is_active)
# TABLE
self.tbl.destroy()
self.tbl = Table(self, table_cards, self.register_table_card_selection)
self.tbl.grid(row=1, column=1)
def register_table_card_selection(self, cards):
"""Callback function executed by the Table
when the user selects cards.
The list of cards is stored in the GameFrame's
state so that it can be queried when the user
makes a move.
Args:
cards (List of Card)
"""
self.selected_table_cards = cards
def play_hand(self, hand_card):
"""Callback function executed when
player clicks the "Play Hand" button.
"""
if self.ronda.current_player is self.player:
print(f'Attempting to play {hand_card} and\
pick up: {self.selected_table_cards}')
if is_valid_pickup(hand_card, self.selected_table_cards):
self.ronda = self.ronda.play_turn(hand_card,
self.selected_table_cards)
self.draw()
self.play_next_move()
else:
print("not your turn")
def play_next_move(self):
"""This function gets called continually as CPU players make
their moves. When it's the user's turn to play, the loop is
broken until they play their hand, which will start up the
cycle again.
"""
if self.ronda.is_finished:
self.display_scores(self.ronda)
return
if self.ronda.current_player is self.player:
pass
else:
sleep_time = random.randrange(0, 1)
self.after(sleep_time*1000, self._play_cpu_move)
def _play_cpu_move(self):
table_cards = self.ronda.current_mesa
current_player = self.ronda.current_player
hand = self.ronda.player_cards[current_player]['hand']
(own_card, mesa_cards) = current_player.get_move(hand, table_cards)
self.ronda = self.ronda.play_turn(own_card, mesa_cards)
print(f'{current_player.name()}\
played: {own_card} and picked up: {mesa_cards}')
self.draw()
self.play_next_move()
| 37.844086
| 79
| 0.574513
| 6,621
| 0.940617
| 0
| 0
| 0
| 0
| 0
| 0
| 1,536
| 0.218213
|
360825b11a2ba8661131f351d015f5a8ff5ce829
| 263
|
py
|
Python
|
Python_Projects/numeric/lossofsignificance.py
|
arifBurakDemiray/TheCodesThatIWrote
|
17d7bc81c516ec97110d0749e9c19d5e6ef9fc88
|
[
"MIT"
] | 1
|
2019-11-01T20:18:06.000Z
|
2019-11-01T20:18:06.000Z
|
Python_Projects/numeric/lossofsignificance.py
|
arifBurakDemiray/TheCodesThatIWrote
|
17d7bc81c516ec97110d0749e9c19d5e6ef9fc88
|
[
"MIT"
] | null | null | null |
Python_Projects/numeric/lossofsignificance.py
|
arifBurakDemiray/TheCodesThatIWrote
|
17d7bc81c516ec97110d0749e9c19d5e6ef9fc88
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 13 13:35:33 2020
"""
#for finding loss of significances
x=1e-1
flag = True
a=0
while (flag):
print (((2*x)/(1-(x**2))),"......",(1/(1+x))-(1/(1-x)))
x= x*(1e-1)
a=a+1
if(a==25):
flag=False
| 14.611111
| 59
| 0.48289
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 110
| 0.418251
|
36087ed60369c020bd543832aa6b41bed88a5c17
| 100
|
py
|
Python
|
easyfl/test.py
|
weimingwill/easyfl-pypi
|
f9135ab14f8d486d4a1065fa62ade43fa14490a5
|
[
"MIT"
] | 2
|
2021-11-08T12:24:06.000Z
|
2021-11-08T12:24:33.000Z
|
easyfl/test.py
|
weimingwill/easyfl-pypi
|
f9135ab14f8d486d4a1065fa62ade43fa14490a5
|
[
"MIT"
] | null | null | null |
easyfl/test.py
|
weimingwill/easyfl-pypi
|
f9135ab14f8d486d4a1065fa62ade43fa14490a5
|
[
"MIT"
] | null | null | null |
class Test:
def __init__(self):
pass
def hi(self):
print("hello world")
| 16.666667
| 28
| 0.52
| 100
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 13
| 0.13
|
360a23f4d6f5c86eb8c653834fc1cf467b915bfa
| 6,479
|
py
|
Python
|
alphamind/model/treemodel.py
|
atefar2/alpha-mind
|
66d839affb5d81d31d5cac7e5e224278e3f99a8b
|
[
"MIT"
] | 1
|
2020-05-18T20:57:25.000Z
|
2020-05-18T20:57:25.000Z
|
alphamind/model/treemodel.py
|
atefar2/alpha-mind
|
66d839affb5d81d31d5cac7e5e224278e3f99a8b
|
[
"MIT"
] | null | null | null |
alphamind/model/treemodel.py
|
atefar2/alpha-mind
|
66d839affb5d81d31d5cac7e5e224278e3f99a8b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on 2017-12-4
@author: cheng.li
"""
import arrow
import numpy as np
import pandas as pd
import xgboost as xgb
from sklearn.ensemble import RandomForestClassifier as RandomForestClassifierImpl
from sklearn.ensemble import RandomForestRegressor as RandomForestRegressorImpl
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier as XGBClassifierImpl
from xgboost import XGBRegressor as XGBRegressorImpl
from alphamind.model.modelbase import create_model_base
class RandomForestRegressor(create_model_base('sklearn')):
def __init__(self,
n_estimators: int = 100,
max_features: str = 'auto',
features=None,
fit_target=None,
**kwargs):
super().__init__(features=features, fit_target=fit_target)
self.impl = RandomForestRegressorImpl(n_estimators=n_estimators,
max_features=max_features,
**kwargs)
@property
def importances(self):
return self.impl.feature_importances_.tolist()
class RandomForestClassifier(create_model_base('sklearn')):
def __init__(self,
n_estimators: int = 100,
max_features: str = 'auto',
features=None,
fit_target=None,
**kwargs):
super().__init__(features=features, fit_target=fit_target)
self.impl = RandomForestClassifierImpl(n_estimators=n_estimators,
max_features=max_features,
**kwargs)
@property
def importances(self):
return self.impl.feature_importances_.tolist()
class XGBRegressor(create_model_base('xgboost')):
def __init__(self,
n_estimators: int = 100,
learning_rate: float = 0.1,
max_depth: int = 3,
features=None,
fit_target=None,
n_jobs: int = 1,
missing: float = np.nan,
**kwargs):
super().__init__(features=features, fit_target=fit_target)
self.impl = XGBRegressorImpl(n_estimators=n_estimators,
learning_rate=learning_rate,
max_depth=max_depth,
n_jobs=n_jobs,
missing=missing,
**kwargs)
@property
def importances(self):
return self.impl.feature_importances_.tolist()
class XGBClassifier(create_model_base('xgboost')):
def __init__(self,
n_estimators: int = 100,
learning_rate: float = 0.1,
max_depth: int = 3,
features=None,
fit_target=None,
n_jobs: int = 1,
missing: float = np.nan,
**kwargs):
super().__init__(features=features, fit_target=fit_target)
self.impl = XGBClassifierImpl(n_estimators=n_estimators,
learning_rate=learning_rate,
max_depth=max_depth,
n_jobs=n_jobs,
missing=missing,
**kwargs)
self.impl = XGBClassifier.model_decode(self.model_encode())
@property
def importances(self):
return self.impl.feature_importances_.tolist()
class XGBTrainer(create_model_base('xgboost')):
def __init__(self,
objective='binary:logistic',
booster='gbtree',
tree_method='hist',
n_estimators: int = 100,
learning_rate: float = 0.1,
max_depth=3,
eval_sample=None,
early_stopping_rounds=None,
subsample=1.,
colsample_bytree=1.,
features=None,
fit_target=None,
random_state: int = 0,
n_jobs: int = 1,
**kwargs):
super().__init__(features=features, fit_target=fit_target)
self.params = {
'silent': 1,
'objective': objective,
'max_depth': max_depth,
'eta': learning_rate,
'booster': booster,
'tree_method': tree_method,
'subsample': subsample,
'colsample_bytree': colsample_bytree,
'nthread': n_jobs,
'seed': random_state
}
self.eval_sample = eval_sample
self.num_boost_round = n_estimators
self.early_stopping_rounds = early_stopping_rounds
self.impl = None
self.kwargs = kwargs
self.trained_time = None
def fit(self, x: pd.DataFrame, y: np.ndarray):
if self.eval_sample:
x_train, x_eval, y_train, y_eval = train_test_split(x[self.features].values,
y,
test_size=self.eval_sample,
random_state=42)
d_train = xgb.DMatrix(x_train, y_train)
d_eval = xgb.DMatrix(x_eval, y_eval)
self.impl = xgb.train(params=self.params,
dtrain=d_train,
num_boost_round=self.num_boost_round,
evals=[(d_eval, 'eval')],
verbose_eval=False,
**self.kwargs)
else:
d_train = xgb.DMatrix(x[self.features].values, y)
self.impl = xgb.train(params=self.params,
dtrain=d_train,
num_boost_round=self.num_boost_round,
**self.kwargs)
self.trained_time = arrow.now().format("YYYY-MM-DD HH:mm:ss")
def predict(self, x: pd.DataFrame) -> np.ndarray:
d_predict = xgb.DMatrix(x[self.features].values)
return self.impl.predict(d_predict)
@property
def importances(self):
imps = self.impl.get_fscore().items()
imps = sorted(imps, key=lambda x: x[0])
return list(zip(*imps))[1]
| 36.60452
| 91
| 0.515666
| 5,937
| 0.916345
| 0
| 0
| 529
| 0.081648
| 0
| 0
| 286
| 0.044143
|