hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6e2838b7143da4e35710c9f9719c3e36ad5f0449
| 1,299
|
py
|
Python
|
chainer_wing/gui_main/main.py
|
fukatani/ChainerWing
|
37a1435635cbc610dc86d15c8baca67622355757
|
[
"MIT",
"BSD-3-Clause"
] | 26
|
2017-07-03T13:50:28.000Z
|
2021-02-06T08:43:42.000Z
|
chainer_wing/gui_main/main.py
|
fukatani/CW_gui
|
37a1435635cbc610dc86d15c8baca67622355757
|
[
"MIT",
"BSD-3-Clause"
] | 10
|
2017-07-03T14:30:00.000Z
|
2017-12-21T13:26:43.000Z
|
chainer_wing/gui_main/main.py
|
fukatani/CW_gui
|
37a1435635cbc610dc86d15c8baca67622355757
|
[
"MIT",
"BSD-3-Clause"
] | 6
|
2017-03-15T13:48:09.000Z
|
2019-04-15T19:28:02.000Z
|
import argparse
import logging
import os
import sys
from PyQt5 import QtWidgets
from chainer_wing.gui_main.graph import Graph
from chainer_wing.gui_main.painter import Painter2D, MainWindow
logger = logging.getLogger('Chainer-Wing')
logger.setLevel(logging.DEBUG)
if os.path.isfile('cw_debug.log'):
os.remove('cw_debug.log')
fh = logging.FileHandler('cw_debug.log')
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
def run():
logger.info('Starting ChainerWing Application with '+' '.join(sys.argv))
app = QtWidgets.QApplication(sys.argv)
painter = initialize_painter()
startUI(app, painter)
def initialize_painter():
painter = Painter2D()
Graph(painter=painter)
return painter
def startUI(app, painter):
win = MainWindow(painter=painter)
win.setArgs(parse_argv())
win.show()
logger.debug('Startup successful. Handing main thread control to Qt main loop.')
sys.exit(app.exec_())
def parse_argv():
parser = argparse.ArgumentParser()
parser.add_argument('-i', action='store_true', required=False)
parser.add_argument('--test', nargs=1, required=False, default=False)
args = parser.parse_args()
return args
| 26.510204
| 85
| 0.727483
|
07b4ee43dae15e9e2f6e154b4a2d99c6afc1e4c6
| 599
|
py
|
Python
|
python/hetu/gpu_ops/Scatter.py
|
codecaution/Hetu
|
e278732c2fe3554c8d576585f5bcbf79ade31b68
|
[
"Apache-2.0"
] | null | null | null |
python/hetu/gpu_ops/Scatter.py
|
codecaution/Hetu
|
e278732c2fe3554c8d576585f5bcbf79ade31b68
|
[
"Apache-2.0"
] | null | null | null |
python/hetu/gpu_ops/Scatter.py
|
codecaution/Hetu
|
e278732c2fe3554c8d576585f5bcbf79ade31b68
|
[
"Apache-2.0"
] | 3
|
2021-11-29T13:47:48.000Z
|
2022-03-03T02:00:43.000Z
|
from __future__ import absolute_import
from .Node import Op
from .. import ndarray
from ..gpu_links import scatter
class ScatterOp(Op):
def __init__(self, node_target, node_index, node_src, ctx=None):
super().__init__(ScatterOp, [node_target, node_index, node_src], ctx)
def compute(self, target, dim, index, src, stream_handle=None):
scatter(target, dim, index, src)
def gradient(self, output_grad):
pass
def infer_shape(self, input_shapes):
pass
def scatter_op(node1, node2, node3, ctx=None):
return ScatterOp(node1, node2, node3, ctx=ctx)
| 28.52381
| 77
| 0.704508
|
d9d77eff8c636ae550cc825ee5cff054aee1ff67
| 706
|
py
|
Python
|
tests/models/util.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 84
|
2017-10-22T11:01:39.000Z
|
2022-02-27T03:43:48.000Z
|
tests/models/util.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 22
|
2017-12-11T07:21:56.000Z
|
2021-09-23T02:53:50.000Z
|
tests/models/util.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 23
|
2017-12-06T06:59:52.000Z
|
2022-02-24T00:02:25.000Z
|
# ----------------------------------------------------------------------
# Various utilities
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# NOC modules
from noc.models import get_model, iter_model_id, is_document
def get_models():
for model_id in iter_model_id():
model = get_model(model_id)
if model and not is_document(model):
yield model
def get_documents():
for model_id in iter_model_id():
model = get_model(model_id)
if model and is_document(model):
yield model
| 29.416667
| 72
| 0.466006
|
1c3828893b4eae531bd9c2811241ec3653b28a34
| 4,349
|
tac
|
Python
|
mailmynet/Maildir/proxy_postfix/Twisted-11.0.0/doc/core/howto/tutorial/listings/finger/finger18.tac
|
SPIN-UMass/SWEET
|
1b0f39222e7064f70812e3293ca023619295741d
|
[
"MIT"
] | 3
|
2020-04-02T06:23:44.000Z
|
2020-08-13T20:32:31.000Z
|
mailmynet/Maildir/proxy_postfix/Twisted-11.0.0/doc/core/howto/tutorial/listings/finger/finger18.tac
|
SPIN-UMass/SWEET
|
1b0f39222e7064f70812e3293ca023619295741d
|
[
"MIT"
] | null | null | null |
mailmynet/Maildir/proxy_postfix/Twisted-11.0.0/doc/core/howto/tutorial/listings/finger/finger18.tac
|
SPIN-UMass/SWEET
|
1b0f39222e7064f70812e3293ca023619295741d
|
[
"MIT"
] | 1
|
2020-04-02T06:26:10.000Z
|
2020-04-02T06:26:10.000Z
|
# Do everything properly
from twisted.application import internet, service
from twisted.internet import protocol, reactor, defer
from twisted.words.protocols import irc
from twisted.protocols import basic
from twisted.web import resource, server, static, xmlrpc
import cgi
def catchError(err):
return "Internal error in server"
class FingerProtocol(basic.LineReceiver):
def lineReceived(self, user):
d = self.factory.getUser(user)
d.addErrback(catchError)
def writeValue(value):
self.transport.write(value+'\r\n')
self.transport.loseConnection()
d.addCallback(writeValue)
class IRCReplyBot(irc.IRCClient):
def connectionMade(self):
self.nickname = self.factory.nickname
irc.IRCClient.connectionMade(self)
def privmsg(self, user, channel, msg):
user = user.split('!')[0]
if self.nickname.lower() == channel.lower():
d = self.factory.getUser(msg)
d.addErrback(catchError)
d.addCallback(lambda m: "Status of %s: %s" % (msg, m))
d.addCallback(lambda m: self.msg(user, m))
class UserStatusTree(resource.Resource):
def __init__(self, service):
resource.Resource.__init__(self)
self.service = service
def render_GET(self, request):
d = self.service.getUsers()
def formatUsers(users):
l = ['<li><a href="%s">%s</a></li>' % (user, user)
for user in users]
return '<ul>'+''.join(l)+'</ul>'
d.addCallback(formatUsers)
d.addCallback(request.write)
d.addCallback(lambda _: request.finish())
return server.NOT_DONE_YET
def getChild(self, path, request):
if path=="":
return UserStatusTree(self.service)
else:
return UserStatus(path, self.service)
class UserStatus(resource.Resource):
def __init__(self, user, service):
resource.Resource.__init__(self)
self.user = user
self.service = service
def render_GET(self, request):
d = self.service.getUser(self.user)
d.addCallback(cgi.escape)
d.addCallback(lambda m:
'<h1>%s</h1>'%self.user+'<p>%s</p>'%m)
d.addCallback(request.write)
d.addCallback(lambda _: request.finish())
return server.NOT_DONE_YET
class UserStatusXR(xmlrpc.XMLRPC):
def __init__(self, service):
xmlrpc.XMLRPC.__init__(self)
self.service = service
def xmlrpc_getUser(self, user):
return self.service.getUser(user)
class FingerService(service.Service):
def __init__(self, filename):
self.filename = filename
self.users = {}
def _read(self):
self.users.clear()
for line in file(self.filename):
user, status = line.split(':', 1)
user = user.strip()
status = status.strip()
self.users[user] = status
self.call = reactor.callLater(30, self._read)
def getUser(self, user):
return defer.succeed(self.users.get(user, "No such user"))
def getUsers(self):
return defer.succeed(self.users.keys())
def getFingerFactory(self):
f = protocol.ServerFactory()
f.protocol = FingerProtocol
f.getUser = self.getUser
return f
def getResource(self):
r = UserStatusTree(self)
x = UserStatusXR(self)
r.putChild('RPC2', x)
return r
def getIRCBot(self, nickname):
f = protocol.ReconnectingClientFactory()
f.protocol = IRCReplyBot
f.nickname = nickname
f.getUser = self.getUser
return f
def startService(self):
self._read()
service.Service.startService(self)
def stopService(self):
service.Service.stopService(self)
self.call.cancel()
application = service.Application('finger', uid=1, gid=1)
f = FingerService('/etc/users')
serviceCollection = service.IServiceCollection(application)
internet.TCPServer(79, f.getFingerFactory()
).setServiceParent(serviceCollection)
internet.TCPServer(8000, server.Site(f.getResource())
).setServiceParent(serviceCollection)
internet.TCPClient('irc.freenode.org', 6667, f.getIRCBot('fingerbot')
).setServiceParent(serviceCollection)
| 29.585034
| 69
| 0.627271
|
85ecf4afc7bbf9612fe3d68efa7a8e58acfb9bd8
| 395
|
py
|
Python
|
algoritmo-Fabiano/17-05-exe1.py
|
Chris-gde/exercicios
|
a448ce837c99fb8043b9060ac1fe14f9b00033a5
|
[
"Apache-2.0"
] | null | null | null |
algoritmo-Fabiano/17-05-exe1.py
|
Chris-gde/exercicios
|
a448ce837c99fb8043b9060ac1fe14f9b00033a5
|
[
"Apache-2.0"
] | null | null | null |
algoritmo-Fabiano/17-05-exe1.py
|
Chris-gde/exercicios
|
a448ce837c99fb8043b9060ac1fe14f9b00033a5
|
[
"Apache-2.0"
] | null | null | null |
'''
1) Faça um algoritmo para ler a idade de 5 pessoas e informar qual foi a maior idade informada.
'''
m=0
i1=int(input("Informe a idade "))
if(i1>m):
m=i1
i2=int(input("Informe a idade "))
if(i2>m):
m=i2
i3=int(input("Informe a idade "))
if(i3>m):
m=i3
i4=int(input("Informe a idade "))
if(i4>m):
m=i4
i5=int(input("Informe a idade "))
if(i5>m):
m=i5
print(m)
| 18.809524
| 96
| 0.602532
|
27385b79600f0fb722efaf970b57fd324774ff82
| 605
|
py
|
Python
|
Python/gradecurving.py
|
JaredLGillespie/OpenKattis
|
71d26883cb5b8a4a1d63a072587de5575d7c29af
|
[
"MIT"
] | null | null | null |
Python/gradecurving.py
|
JaredLGillespie/OpenKattis
|
71d26883cb5b8a4a1d63a072587de5575d7c29af
|
[
"MIT"
] | null | null | null |
Python/gradecurving.py
|
JaredLGillespie/OpenKattis
|
71d26883cb5b8a4a1d63a072587de5575d7c29af
|
[
"MIT"
] | null | null | null |
# https://open.kattis.com/problems/gradecurving
import math
def curve_func(x):
return 10 * math.sqrt(x)
x, ylow, yhigh = map(int, input().split())
ylowi, yhighi = -1, -1
i = 0
while i <= 100000:
if math.ceil(x) <= yhigh:
yhighi = i
if math.ceil(x) >= ylow and ylowi == -1:
ylowi = i
if math.ceil(x) > yhigh:
break
x = curve_func(x)
i += 1
if ylowi == -1:
print('impossible')
elif yhighi == 100000:
if ylowi == 100000:
print('inf inf')
else:
print('%s inf' % ylowi)
else:
print('%s %s' % (ylowi, yhighi))
| 15.512821
| 48
| 0.533884
|
976a037b28ab892c7642ad838881f31864cd0f41
| 178
|
py
|
Python
|
lib/EGG_research/egg/nest/__init__.py
|
Slowika/GameBias-EmeCom2020
|
5b94c47559f8202bca99c26fc1bcb078dd0509a6
|
[
"MIT"
] | 15
|
2020-09-23T08:24:33.000Z
|
2022-02-09T14:32:49.000Z
|
lib/EGG_research/egg/nest/__init__.py
|
Slowika/GameBias-EmeCom2020
|
5b94c47559f8202bca99c26fc1bcb078dd0509a6
|
[
"MIT"
] | null | null | null |
lib/EGG_research/egg/nest/__init__.py
|
Slowika/GameBias-EmeCom2020
|
5b94c47559f8202bca99c26fc1bcb078dd0509a6
|
[
"MIT"
] | 5
|
2021-03-05T16:54:45.000Z
|
2022-03-31T13:33:58.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| 25.428571
| 65
| 0.752809
|
a8de0b9ee5bfb9fd04d2508c09c73e52ebd9cc50
| 2,298
|
py
|
Python
|
localflavor/ru/forms.py
|
infoxchange/django-localflavor
|
d3bc070319519b6572e2b6d3d54a4e96bee1ff78
|
[
"BSD-3-Clause"
] | null | null | null |
localflavor/ru/forms.py
|
infoxchange/django-localflavor
|
d3bc070319519b6572e2b6d3d54a4e96bee1ff78
|
[
"BSD-3-Clause"
] | null | null | null |
localflavor/ru/forms.py
|
infoxchange/django-localflavor
|
d3bc070319519b6572e2b6d3d54a4e96bee1ff78
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Russian-specific forms helpers
"""
from __future__ import unicode_literals
import re
from django.forms.fields import RegexField, Select
from django.utils.translation import ugettext_lazy as _
from .ru_regions import RU_COUNTY_CHOICES, RU_REGIONS_CHOICES
phone_digits_re = re.compile(r'^(?:[78]-?)?(\d{3})[-\.]?(\d{3})[-\.]?(\d{4})$')
class RUCountySelect(Select):
"""
A Select widget that uses a list of Russian Counties as its choices.
"""
def __init__(self, attrs=None):
super(RUCountySelect, self).__init__(attrs, choices=RU_COUNTY_CHOICES)
class RURegionSelect(Select):
"""
A Select widget that uses a list of Russian Regions as its choices.
"""
def __init__(self, attrs=None):
super(RURegionSelect, self).__init__(attrs, choices=RU_REGIONS_CHOICES)
class RUPostalCodeField(RegexField):
"""
Russian Postal code field.
Format: XXXXXX, where X is any digit, and first digit is not zero.
"""
default_error_messages = {
'invalid': _('Enter a postal code in the format XXXXXX.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(RUPostalCodeField, self).__init__(r'^\d{6}$',
max_length, min_length, *args, **kwargs)
class RUPassportNumberField(RegexField):
"""
Russian internal passport number format:
XXXX XXXXXX where X - any digit.
"""
default_error_messages = {
'invalid': _('Enter a passport number in the format XXXX XXXXXX.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(RUPassportNumberField, self).__init__(r'^\d{4} \d{6}$',
max_length, min_length, *args, **kwargs)
class RUAlienPassportNumberField(RegexField):
"""
Russian alien's passport number format:
XX XXXXXXX where X - any digit.
"""
default_error_messages = {
'invalid': _('Enter a passport number in the format XX XXXXXXX.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(RUAlienPassportNumberField, self).__init__(r'^\d{2} \d{7}$',
max_length, min_length, *args, **kwargs)
| 31.916667
| 97
| 0.633159
|
6ebe97836ce8da758909601b639bd2ba30cb478e
| 4,798
|
py
|
Python
|
zentral/contrib/inventory/urls.py
|
arubdesu/zentral
|
ac0fe663f6e1c27f9a9f55a7500a87e6ac7d9190
|
[
"Apache-2.0"
] | 634
|
2015-10-30T00:55:40.000Z
|
2022-03-31T02:59:00.000Z
|
zentral/contrib/inventory/urls.py
|
arubdesu/zentral
|
ac0fe663f6e1c27f9a9f55a7500a87e6ac7d9190
|
[
"Apache-2.0"
] | 145
|
2015-11-06T00:17:33.000Z
|
2022-03-16T13:30:31.000Z
|
zentral/contrib/inventory/urls.py
|
arubdesu/zentral
|
ac0fe663f6e1c27f9a9f55a7500a87e6ac7d9190
|
[
"Apache-2.0"
] | 103
|
2015-11-07T07:08:49.000Z
|
2022-03-18T17:34:36.000Z
|
from django.conf.urls import url
from . import views
app_name = "inventory"
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^groups/$', views.GroupsView.as_view(), name='groups'),
url(r'^groups/(?P<group_id>\d+)/machines/$', views.GroupMachinesView.as_view(), name='group_machines'),
url(r'^business_units/$', views.MBUView.as_view(), name='mbu'),
url(r'^business_units/review_merge/$', views.ReviewMBUMergeView.as_view(), name='review_mbu_merge'),
url(r'^business_units/merge/$', views.MergeMBUView.as_view(), name='merge_mbu'),
url(r'^business_units/create/$', views.CreateMBUView.as_view(), name='create_mbu'),
url(r'^business_units/(?P<pk>\d+)/update/$', views.UpdateMBUView.as_view(), name='update_mbu'),
url(r'^business_units/(?P<pk>\d+)/delete/$', views.DeleteMBUView.as_view(), name='delete_mbu'),
url(r'^business_units/(?P<pk>\d+)/tags/$', views.MBUTagsView.as_view(), name='mbu_tags'),
url(r'^business_units/(?P<pk>\d+)/tags/(?P<tag_id>\d+)/remove/$',
views.RemoveMBUTagView.as_view(),
name='remove_mbu_tag'),
url(r'^business_units/(?P<pk>\d+)/machines/$', views.MBUMachinesView.as_view(), name='mbu_machines'),
url(r'^business_units/(?P<pk>\d+)/detach_bu/(?P<bu_id>\d+)/$', views.DetachBUView.as_view(), name='detach_bu'),
url(r'^business_units/(?P<pk>\d+)/api_enrollment/$',
views.MBUAPIEnrollmentView.as_view(),
name='mbu_api_enrollment'),
url(r'^machine/(?P<urlsafe_serial_number>\S+)/events/$',
views.MachineEventsView.as_view(),
name='machine_events'),
url(r'^machine/(?P<urlsafe_serial_number>\S+)/events/fetch/$',
views.FetchMachineEventsView.as_view(),
name='fetch_machine_events'),
url(r'^machine/(?P<urlsafe_serial_number>\S+)/events/store_redirect/$',
views.MachineEventsStoreRedirectView.as_view(),
name='machine_events_store_redirect'),
url(r'^machine/(?P<urlsafe_serial_number>\S+)/macos_app_instances/$',
views.MachineMacOSAppInstancesView.as_view(),
name='machine_macos_app_instances'),
url(r'^machine/(?P<urlsafe_serial_number>\S+)/program_instances/$',
views.MachineProgramInstancesView.as_view(),
name='machine_program_instances'),
url(r'^machine/(?P<urlsafe_serial_number>\S+)/deb_packages/$',
views.MachineDebPackagesView.as_view(),
name='machine_deb_packages'),
url(r'^machine/(?P<urlsafe_serial_number>\S+)/incidents/$',
views.MachineIncidentsView.as_view(),
name='machine_incidents'),
url(r'^machine/(?P<urlsafe_serial_number>\S+)/tags/$', views.MachineTagsView.as_view(), name='machine_tags'),
url(r'^machine/(?P<urlsafe_serial_number>\S+)/tags/(?P<tag_id>\d+)/remove/$',
views.RemoveMachineTagView.as_view(),
name='remove_machine_tag'),
url(r'^machine/(?P<urlsafe_serial_number>\S+)/archive/$',
views.ArchiveMachineView.as_view(),
name='archive_machine'),
url(r'^machine/(?P<urlsafe_serial_number>\S+)/heartbeats/$',
views.MachineHeartbeatsView.as_view(),
name='machine_heartbeats'),
url(r'^machine/(?P<urlsafe_serial_number>\S+)/$', views.MachineView.as_view(), name='machine'),
url(r'^tags/$', views.TagsView.as_view(), name='tags'),
url(r'^tags/create/$', views.CreateTagView.as_view(), name='create_tag'),
url(r'^tags/(?P<pk>\d+)/update/$', views.UpdateTagView.as_view(), name='update_tag'),
url(r'^tags/(?P<pk>\d+)/delete/$', views.DeleteTagView.as_view(), name='delete_tag'),
url(r'^taxonomies/create/$', views.CreateTaxonomyView.as_view(), name='create_taxonomy'),
url(r'^taxonomies/(?P<pk>\d+)/update/$', views.UpdateTaxonomyView.as_view(), name='update_taxonomy'),
url(r'^taxonomies/(?P<pk>\d+)/delete/$', views.DeleteTaxonomyView.as_view(), name='delete_taxonomy'),
url(r'^macos_apps/$', views.MacOSAppsView.as_view(), name='macos_apps'),
url(r'^macos_apps/(?P<pk>\d+)/$', views.MacOSAppView.as_view(), name='macos_app'),
url(r'^macos_apps/(?P<pk>\d+)/instance/(?P<osx_app_instance_id>\d+)/machines/$',
views.OSXAppInstanceMachinesView.as_view(),
name='macos_app_instance_machines'),
# API
url(r'^prometheus_metrics/$',
views.PrometheusMetricsView.as_view(),
name='prometheus_metrics'),
]
main_menu_cfg = {
'weight': 0,
'items': (
('index', 'Machines', False, ("inventory.view_machinesnapshot",)),
('groups', 'Groups', False, ("inventory.view_machinegroup",)),
('mbu', 'Business units', False, ("inventory.view_metabusinessunit",)),
('macos_apps', 'macOS applications', False, ("inventory.view_osxapp", "inventory.view_osxappinstance")),
('tags', 'Tags', False, ("inventory.view_tag",)),
)
}
| 55.790698
| 115
| 0.666736
|
141886650083c4cd29be0c488a4877051b190a0c
| 5,956
|
py
|
Python
|
ChernMachine/kernel/VJob.py
|
hepChern/ChernMachine
|
174f0d6958427bb8ba9c0538f0cf3597702cfe16
|
[
"MIT"
] | null | null | null |
ChernMachine/kernel/VJob.py
|
hepChern/ChernMachine
|
174f0d6958427bb8ba9c0538f0cf3597702cfe16
|
[
"MIT"
] | null | null | null |
ChernMachine/kernel/VJob.py
|
hepChern/ChernMachine
|
174f0d6958427bb8ba9c0538f0cf3597702cfe16
|
[
"MIT"
] | null | null | null |
import os
from Chern.utils import utils
from Chern.utils import csys
from Chern.utils import metadata
class VJob(object):
""" Virtual class of the objects, including VVolume, VImage, VContainer
"""
def __init__(self, path):
""" Initialize the project the only **information** of a object instance
"""
self.path = csys.strip_path_string(path)
self.config_file = metadata.ConfigFile(self.path+"/config.json")
def __str__(self):
""" Define the behavior of print(vobject)
"""
return self.path
def __repr__(self):
""" Define the behavior of print(vobject)
"""
return self.path
def relative_path(self, path):
""" Return a path relative to the path of this object
"""
return os.path.relpath(path, self.path)
def job_type(self):
""" Return the type of the object under a specific path.
If path is left blank, return the type of the object itself.
"""
return self.config_file.read_variable("object_type", "")
def is_zombie(self):
return self.job_type() == ""
def error(self):
if os.path.exists(self.path+"/error"):
f = open(self.path+"/error")
error = f.read()
f.close()
return error
else:
return ""
def append_error(self, message):
with open(self.path+"/error", "w") as f:
f.write(message)
f.write("\n")
def add_arc_from(self, path):
""" Add an link from the path object to this object
"""
config_file = metadata.ConfigFile(path+"/.chern/config.json")
succ_str = config_file.read_variable("successors")
if succ_str is None:
succ_str = []
succ_str.append(self.path)
config_file.write_variable("successors", succ_str)
pred_str = self.config_file.read_variable("predecessors")
if pred_str is None:
pred_str = []
pred_str.append(path)
self.config_file.write_variable("predecessors", pred_str)
def remove_arc_from(self, path):
""" FIXME
Remove link from the path
Just copied from "remove_arc_from"
"""
config_file = metadata.ConfigFile(path+"/.chern/config.json")
succ_str = config_file.read_variable("successors")
succ_str.remove(self.path)
config_file.write_variable("successors", succ_str)
config_file = metadata.ConfigFile(self.path+"/.chern/config.json")
pred_str = config_file.read_variable("predecessors")
pred_str.remove(path)
config_file.write_variable("predecessors", pred_str)
def add_arc_to(self, path):
""" FIXME:
Add a link from this object to the path object
"""
config_file = metadata.ConfigFile(path+"/.chern/config.json")
pred_str = config_file.read_variable("predecessors")
if pred_str is None:
pred_str = []
pred_str.append(self.path)
config_file.write_variable("predecessors", pred_str)
config_file = metadata.ConfigFile(self.path+"/.chern/config.json")
succ_str = config_file.read_variable("successors")
if succ_str is None:
succ_str = []
succ_str.append(path)
config_file.write_variable("successors", succ_str)
def remove_arc_to(self, path):
""" FIXME remove the path to the path
"""
config_file = metadata.ConfigFile(path+"/.chern/config.json")
pred_str = config_file.read_variable("predecessors")
pred_str.remove(self.path)
config_file.write_variable("predecessors", pred_str)
config_file = metadata.ConfigFile(self.path+"/.chern/config.json")
succ_str = config_file.read_variable("successors")
succ_str.remove(path)
config_file.write_variable("successors", succ_str)
def successors(self):
""" The successors of the current object
"""
succ_str = self.config_file.read_variable("successors")
if succ_str is None:
return []
successors = []
for path in succ_str:
successors.append(VJob(path))
return successors
def predecessors(self):
""" Predecessors
"""
pred_str = self.config_file.read_variable("dependencies", [])
predecessors = []
for path in pred_str:
predecessors.append(VJob(os.path.join(os.environ["HOME"], ".ChernMachine/Storage", path)))
return predecessors
def impression_to_alias(self, path):
"""
"""
impression_to_alias = self.config_file.read_variable("impression_to_alias", {})
return impression_to_alias.get(path, "")
def alias_to_path(self, alias):
alias_to_path = self.config_file.read_variable("alias_to_path")
return alias_to_path[alias]
def remove_alias(self, alias):
if alias == "":
return
alias_to_path = self.config_file.read_variable("alias_to_path")
path_to_alias = self.config_file.read_variable("path_to_alias")
path = alias_to_path[alias]
path_to_alias.pop(path)
alias_to_path.pop(alias)
self.config_file.write_variable("alias_to_path", alias_to_path)
self.config_file.write_variable("path_to_alias", path_to_alias)
def set_alias(self, alias, path):
if alias == "":
return
path_to_alias = self.config_file.read_variable("path_to_alias")
alias_to_path = self.config_file.read_variable("alias_to_path")
if path_to_alias is None:
path_to_alias = {}
if alias_to_path is None:
alias_to_path = {}
path_to_alias[path] = alias
alias_to_path[alias] = path
self.config_file.write_variable("path_to_alias", path_to_alias)
self.config_file.write_variable("alias_to_path", alias_to_path)
| 35.242604
| 102
| 0.626931
|
6fcddedbe44529064dd6a729977a896dfe3f1093
| 1,283
|
py
|
Python
|
topi/python/topi/tensor.py
|
titikid/tvm
|
0cf3765b28d457d2503ec20b551e9a8eadb1491d
|
[
"Apache-2.0"
] | 6
|
2019-08-29T19:00:57.000Z
|
2020-06-15T14:55:16.000Z
|
topi/python/topi/tensor.py
|
clhne/tvm
|
d59320c764bd09474775e1b292f3c05c27743d24
|
[
"Apache-2.0"
] | 2
|
2018-06-19T10:11:42.000Z
|
2018-06-21T08:49:05.000Z
|
topi/python/topi/tensor.py
|
clhne/tvm
|
d59320c764bd09474775e1b292f3c05c27743d24
|
[
"Apache-2.0"
] | 3
|
2018-05-29T05:54:15.000Z
|
2018-06-11T09:51:51.000Z
|
# pylint: disable=invalid-name,consider-using-enumerate,unused-argument,len-as-condition
"""Elementwise operators"""
from __future__ import absolute_import as _abs
import tvm
from . import cpp
from . import tag
@tvm.tag_scope(tag=tag.ELEMWISE)
def elemwise_sum(xs):
"""Perform element-wise sum on inputs
Parameters
----------
xs : list of tvm.Tensor
Input arguments.
Returns
-------
y : tvm.Tensor
The result.
"""
return cpp.elemwise_sum(xs)
@tvm.tag_scope(tag=tag.ELEMWISE)
def full(shape, dtype, fill_value):
"""Fill tensor with fill_value
Parameters
----------
shape : tuple
Input tensor shape.
dtype : str
Data type
fill_value : float
Value to be filled
Returns
-------
y : tvm.Tensor
The result.
"""
return cpp.full(shape, dtype, fill_value)
@tvm.tag_scope(tag=tag.ELEMWISE)
def full_like(x, fill_value):
"""Construct a tensor with same shape as input tensor,
then fill tensor with fill_value.
Parameters
----------
x : tvm.Tensor
Input argument.
fill_value : float
Value to be filled
Returns
-------
y : tvm.Tensor
The result.
"""
return cpp.full_like(x, fill_value)
| 20.046875
| 88
| 0.613406
|
e6cc651e0b3c89b99fd7cad95bd041e47975b9fb
| 1,140
|
py
|
Python
|
output/models/ms_data/datatypes/date_xsd/date.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/ms_data/datatypes/date_xsd/date.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/ms_data/datatypes/date_xsd/date.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass, field
from typing import Optional
from xsdata.models.datatype import XmlDate
@dataclass
class ComplexfooType:
class Meta:
name = "complexfooType"
comp_foo: Optional[XmlDate] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
}
)
@dataclass
class SimpleTest:
class Meta:
name = "simpleTest"
value: Optional[XmlDate] = field(
default=None,
metadata={
"required": True,
}
)
@dataclass
class ComplexTest(ComplexfooType):
class Meta:
name = "complexTest"
@dataclass
class Root:
class Meta:
name = "root"
complex_test: Optional[ComplexTest] = field(
default=None,
metadata={
"name": "complexTest",
"type": "Element",
"required": True,
}
)
simple_test: Optional[XmlDate] = field(
default=None,
metadata={
"name": "simpleTest",
"type": "Element",
"required": True,
}
)
| 18.688525
| 48
| 0.535965
|
f9a6012d6cd0429c0dfd5422623a9e32d43ef0bf
| 11,812
|
py
|
Python
|
resotocore/core/query/query_parser.py
|
someengineering/cloudkeeper
|
b5fcb30bf4e727eabe93e536d925d0f76926630d
|
[
"Apache-2.0"
] | 316
|
2021-07-08T12:54:19.000Z
|
2022-01-12T18:50:17.000Z
|
resotocore/core/query/query_parser.py
|
someengineering/cloudkeeper
|
b5fcb30bf4e727eabe93e536d925d0f76926630d
|
[
"Apache-2.0"
] | 80
|
2021-07-30T23:55:50.000Z
|
2022-01-12T15:39:31.000Z
|
resotocore/core/query/query_parser.py
|
someengineering/cloudkeeper
|
b5fcb30bf4e727eabe93e536d925d0f76926630d
|
[
"Apache-2.0"
] | 14
|
2021-08-23T08:29:29.000Z
|
2022-01-08T04:42:28.000Z
|
from dataclasses import replace
from functools import reduce
import parsy
from parsy import string, Parser, regex
from core.error import ParseError
from core.model.graph_access import EdgeType, Direction
from core.parse_util import (
lparen_p,
lexeme,
rparen_p,
l_bracket_p,
r_bracket_p,
colon_p,
comma_p,
equals_p,
true_p,
false_p,
dot_dot_p,
float_p,
integer_dp,
variable_dp,
literal_p,
make_parser,
whitespace,
quoted_string_p,
space_dp,
json_value_p,
variable_p,
variable_no_array_p,
integer_p,
double_quote_dp,
l_curly_dp,
r_curly_dp,
l_curly_p,
r_curly_p,
)
from core.query.model import (
Predicate,
CombinedTerm,
IsTerm,
Part,
Navigation,
Query,
FunctionTerm,
IdTerm,
AggregateVariable,
AggregateFunction,
Aggregate,
AllTerm,
Sort,
SortOrder,
WithClauseFilter,
WithClause,
AggregateVariableName,
AggregateVariableCombined,
NotTerm,
MergeTerm,
MergeQuery,
)
operation_p = (
reduce(
lambda x, y: x | y, [lexeme(string(a)) for a in ["<=", ">=", ">", "<", "==", "!=", "=~", "!~", "in", "not in"]]
)
| lexeme(string("=")).result("==")
| lexeme(string("~")).result("=~")
)
array_modifier_p = reduce(lambda x, y: x | y, [lexeme(string(a)) for a in ["all", "any", "none"]])
function_p = reduce(lambda x, y: x | y, [lexeme(string(a)) for a in ["in_subnet", "has_desired_change", "has_key"]])
preamble_prop_p = reduce(lambda x, y: x | y, [lexeme(string(a)) for a in ["edge_type", "merge_with_ancestors"]])
@make_parser
def predicate_term() -> Parser:
name = yield variable_p
modifier = yield array_modifier_p.optional()
opts = {"filter": modifier} if modifier else {}
op = yield operation_p
value = yield json_value_p
return Predicate(name, op, value, opts)
@make_parser
def function_term() -> Parser:
fn = yield function_p
yield lparen_p
name = yield variable_p
args = yield (comma_p >> json_value_p).many()
yield rparen_p
return FunctionTerm(fn, name, args)
@make_parser
def not_term() -> Parser:
yield not_p
yield lparen_p
term = yield simple_term_p
yield rparen_p
return NotTerm(term)
literal_list_comma_separated_p = (quoted_string_p | literal_p).sep_by(comma_p, min=1)
literal_list_in_square_brackets = l_bracket_p >> literal_list_comma_separated_p << r_bracket_p
literal_list_optional_brackets = literal_list_in_square_brackets | literal_list_comma_separated_p
is_term = lexeme(string("is") >> lparen_p >> literal_list_optional_brackets << rparen_p).map(IsTerm)
id_term = lexeme(string("id") >> lparen_p >> (quoted_string_p | literal_p) << rparen_p).map(IdTerm)
match_all_term = lexeme(string("all")).map(lambda _: AllTerm())
leaf_term_p = is_term | id_term | match_all_term | function_term | predicate_term | not_term
bool_op_p = lexeme(string("and") | string("or"))
not_p = lexeme(string("not"))
@make_parser
def combined_term() -> Parser:
left = yield simple_term_p
result = left
while True:
op = yield bool_op_p.optional()
if op is None:
break
right = yield simple_term_p
result = CombinedTerm(result, op, right)
return result
simple_term_p = (lparen_p >> combined_term << rparen_p) | leaf_term_p
# This can parse a complete term
filter_term_parser = combined_term | simple_term_p
square_brackets_p = lexeme(string("[]"))
@make_parser
def merge_query_parser() -> Parser:
name = yield variable_no_array_p
is_array = yield square_brackets_p.optional()
yield colon_p
query = yield query_parser
return MergeQuery(name, query, not (query.aggregate or is_array))
@make_parser
def merge_parser() -> Parser:
yield l_curly_p
queries = yield merge_query_parser.sep_by(comma_p, min=1)
yield r_curly_p
return queries
@make_parser
def term_parser() -> Parser:
filter_term = yield filter_term_parser
merge = yield merge_parser.optional()
if merge:
post_filter = yield filter_term_parser.optional()
return MergeTerm(filter_term, merge, post_filter)
else:
return filter_term
@make_parser
def range_parser() -> Parser:
yield l_bracket_p
start = yield integer_p
has_end = yield (colon_p | comma_p | dot_dot_p).optional()
maybe_end = yield integer_p.optional()
yield r_bracket_p
end = start if has_end is None else maybe_end if maybe_end is not None else Navigation.Max
return start, end
edge_type_p = lexeme(regex("[A-Za-z][A-Za-z0-9_]*"))
@make_parser
def edge_definition() -> Parser:
maybe_edge_type = yield edge_type_p.optional()
maybe_range = yield range_parser.optional()
parsed_range = maybe_range if maybe_range else (1, 1)
return parsed_range[0], parsed_range[1], maybe_edge_type
out_p = lexeme(string("-") >> edge_definition << string("->")).map(
lambda nav: Navigation(nav[0], nav[1], nav[2], Direction.outbound)
)
in_p = lexeme(string("<-") >> edge_definition << string("-")).map(
lambda nav: Navigation(nav[0], nav[1], nav[2], Direction.inbound)
)
in_out_p = lexeme(string("<-") >> edge_definition << string("->")).map(
lambda nav: Navigation(nav[0], nav[1], nav[2], Direction.any)
)
navigation_parser = in_out_p | out_p | in_p
tag_parser = lexeme(string("#") >> literal_p).optional()
with_p = lexeme(string("with"))
count_p = lexeme(string("count"))
len_empty = lexeme(string("empty")).result(WithClauseFilter("==", 0))
len_any = lexeme(string("any")).result(WithClauseFilter(">", 0))
@make_parser
def with_count_parser() -> Parser:
yield count_p
op = yield operation_p
num = yield integer_p
return WithClauseFilter(op, num)
@make_parser
def with_clause_parser() -> Parser:
yield with_p
yield lparen_p
with_filter = yield len_empty | len_any | with_count_parser
yield comma_p
nav = yield navigation_parser
term = yield filter_term_parser.optional()
with_clause = yield with_clause_parser.optional()
yield rparen_p
assert 0 <= nav.start <= 1, "with traversal need to start from 0 or 1"
return WithClause(with_filter, nav, term, with_clause)
sort_order_p = string("asc") | string("desc")
sort_dp = string("sort")
@make_parser
def single_sort_arg_parser() -> Parser:
name = yield variable_dp
order = yield (space_dp >> sort_order_p).optional()
return Sort(name, order if order else SortOrder.Asc)
@make_parser
def sort_parser() -> Parser:
yield sort_dp
yield space_dp
attributes = yield single_sort_arg_parser.sep_by(comma_p, min=1)
yield whitespace
return attributes
limit_p = string("limit")
limit_parser = limit_p + space_dp >> integer_dp
@make_parser
def part_parser() -> Parser:
term = yield term_parser.optional()
yield whitespace
with_clause = yield with_clause_parser.optional()
tag = yield tag_parser
sort = yield sort_parser.optional()
limit = yield limit_parser.optional()
nav = yield navigation_parser.optional() if term or sort or limit else navigation_parser
term = term if term else AllTerm()
return Part(term, tag, with_clause, sort if sort else [], limit, nav)
@make_parser
def key_value_preamble_parser() -> Parser:
key = yield preamble_prop_p
yield equals_p
value = yield quoted_string_p | true_p | false_p | float_p | integer_p | literal_p
return key, value
@make_parser
def preamble_tags_parser() -> Parser:
yield lparen_p
key_values = yield key_value_preamble_parser.sep_by(comma_p)
yield rparen_p
return dict(key_values)
as_p = lexeme(string("as"))
aggregate_p = lexeme(string("aggregate"))
aggregate_func_p = reduce(lambda x, y: x | y, [lexeme(string(a)) for a in ["sum", "count", "min", "max", "avg"]])
match_p = lexeme(string("match"))
aggregate_variable_name_p = variable_p.map(AggregateVariableName)
no_curly_dp = regex(r'[^{"]+')
var_in_curly = (l_curly_dp >> variable_p << r_curly_dp).map(AggregateVariableName)
aggregate_group_variable_name_combined_p = (
double_quote_dp >> (no_curly_dp | var_in_curly).at_least(1).map(AggregateVariableCombined) << double_quote_dp
)
@make_parser
def aggregate_group_variable_parser() -> Parser:
name = yield aggregate_variable_name_p | aggregate_group_variable_name_combined_p
as_name = yield (as_p >> literal_p).optional()
return AggregateVariable(name, as_name)
@make_parser
def merge_ancestors_parser() -> Parser:
# parses foo as bla -> "foo", "bla"
# parses foo -> "foo", "foo"
name = yield variable_p
as_name = yield (as_p >> literal_p).optional()
return name, as_name if as_name else name
math_op_p = reduce(lambda x, y: x | y, [lexeme(string(a)) for a in ["+", "-", "*", "/", "%"]])
@make_parser
def op_with_val_parser() -> Parser:
op = yield math_op_p
value = yield float_p | integer_p
return op, value
@make_parser
def aggregate_group_function_parser() -> Parser:
func = yield aggregate_func_p
yield lparen_p
term_or_int = yield variable_p | integer_p
ops_list = yield op_with_val_parser.many()
yield rparen_p
with_as = yield as_p.optional()
as_name = None
if with_as:
as_name = yield literal_p
return AggregateFunction(func, term_or_int, ops_list, as_name)
@make_parser
def aggregate_parameter_parser() -> Parser:
group_vars = yield (aggregate_group_variable_parser.sep_by(comma_p, min=1) << colon_p).optional()
group_function_vars = yield aggregate_group_function_parser.sep_by(comma_p, min=1)
return group_vars if group_vars else [], group_function_vars
@make_parser
def aggregate_parser() -> Parser:
yield aggregate_p
yield lparen_p
group_vars, group_function_vars = yield aggregate_parameter_parser
yield rparen_p
return Aggregate(group_vars, group_function_vars)
@make_parser
def preamble_parser() -> Parser:
maybe_aggregate = yield aggregate_parser.optional()
maybe_preamble = yield preamble_tags_parser.optional()
preamble = maybe_preamble if maybe_preamble else {}
yield colon_p if maybe_aggregate or maybe_preamble else colon_p.optional()
return maybe_aggregate, preamble
@make_parser
def query_parser() -> Parser:
maybe_aggregate, preamble = yield preamble_parser
parts = yield part_parser.at_least(1)
edge_type = preamble.get("edge_type", EdgeType.default)
if edge_type not in EdgeType.all:
raise AttributeError(f"Given edge_type {edge_type} is not available. Use one of {EdgeType.all}")
def set_edge_type_if_not_set(part: Part) -> Part:
def set_in_with_clause(wc: WithClause) -> WithClause:
nav = wc.navigation
if wc.navigation and not wc.navigation.edge_type:
nav = replace(nav, edge_type=edge_type)
inner = set_in_with_clause(wc.with_clause) if wc.with_clause else wc.with_clause
return replace(wc, navigation=nav, with_clause=inner)
nav = part.navigation
if part.navigation and not part.navigation.edge_type:
nav = replace(nav, edge_type=edge_type)
adapted_wc = set_in_with_clause(part.with_clause) if part.with_clause else part.with_clause
return replace(part, navigation=nav, with_clause=adapted_wc)
adapted = [set_edge_type_if_not_set(part).rewrite_for_ancestors_descendants() for part in parts]
# remove values from preamble, that are only used at parsing time
resulting_preamble = preamble.copy()
return Query(adapted[::-1], resulting_preamble, maybe_aggregate)
def parse_query(query: str) -> Query:
try:
return query_parser.parse(query.strip()) # type: ignore
except parsy.ParseError as ex:
raise ParseError(f"Can not parse query: {query}\n" + str(ex)) from ex
| 29.828283
| 119
| 0.703353
|
a55e062f02fed89f050da05ba6f1f9b5d63cc685
| 172
|
py
|
Python
|
lab4/src/combinatoric/modules/recurrent_variant5.py
|
Pavel-Innokentevich-Galanin/5-sem_DM
|
d2c2d4bcd89fbd4cbf8b8518173f5661924be1d5
|
[
"Unlicense"
] | null | null | null |
lab4/src/combinatoric/modules/recurrent_variant5.py
|
Pavel-Innokentevich-Galanin/5-sem_DM
|
d2c2d4bcd89fbd4cbf8b8518173f5661924be1d5
|
[
"Unlicense"
] | null | null | null |
lab4/src/combinatoric/modules/recurrent_variant5.py
|
Pavel-Innokentevich-Galanin/5-sem_DM
|
d2c2d4bcd89fbd4cbf8b8518173f5661924be1d5
|
[
"Unlicense"
] | null | null | null |
def recurrent_variant5(n):
if n == 0:
return 2
elif n == 1:
return 6
else:
return -2 * recurrent_variant5(n-1) - recurrent_variant5(n-2)
| 24.571429
| 69
| 0.563953
|
84ac0abc76ebea1d957a07605101c64c9319b93a
| 4,645
|
py
|
Python
|
nikola/plugins/task_create_bundles.py
|
servalproject/nikola
|
4d78504d93597894f3da4a434dfafdec907601a7
|
[
"MIT"
] | 1
|
2015-12-14T21:38:33.000Z
|
2015-12-14T21:38:33.000Z
|
nikola/plugins/task_create_bundles.py
|
servalproject/nikola
|
4d78504d93597894f3da4a434dfafdec907601a7
|
[
"MIT"
] | null | null | null |
nikola/plugins/task_create_bundles.py
|
servalproject/nikola
|
4d78504d93597894f3da4a434dfafdec907601a7
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2012 Roberto Alsina y otros.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals
import os
try:
import webassets
except ImportError:
webassets = None # NOQA
from nikola.plugin_categories import LateTask
from nikola import utils
class BuildBundles(LateTask):
"""Bundle assets using WebAssets."""
name = "build_bundles"
def set_site(self, site):
super(BuildBundles, self).set_site(site)
if webassets is None:
self.site.config['USE_BUNDLES'] = False
def gen_tasks(self):
"""Bundle assets using WebAssets."""
kw = {
'filters': self.site.config['FILTERS'],
'output_folder': self.site.config['OUTPUT_FOLDER'],
'cache_folder': self.site.config['CACHE_FOLDER'],
'theme_bundles': get_theme_bundles(self.site.THEMES),
'themes': self.site.THEMES,
'files_folders': self.site.config['FILES_FOLDERS'],
'code_color_scheme': self.site.config['CODE_COLOR_SCHEME'],
}
def build_bundle(output, inputs):
out_dir = os.path.join(kw['output_folder'],
os.path.dirname(output))
inputs = [i for i in inputs if os.path.isfile(
os.path.join(out_dir, i))]
cache_dir = os.path.join(kw['cache_folder'], 'webassets')
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir)
env = webassets.Environment(out_dir, os.path.dirname(output),
cache=cache_dir)
bundle = webassets.Bundle(*inputs, output=os.path.basename(output))
env.register(output, bundle)
# This generates the file
env[output].urls()
flag = False
if (webassets is not None and self.site.config['USE_BUNDLES'] is not
False):
for name, files in kw['theme_bundles'].items():
output_path = os.path.join(kw['output_folder'], name)
dname = os.path.dirname(name)
file_dep = [utils.get_asset_path(
os.path.join(dname, fname), kw['themes'],
kw['files_folders'])
for fname in files
]
file_dep = filter(None, file_dep) # removes missing files
task = {
'file_dep': file_dep,
'basename': str(self.name),
'name': str(output_path),
'actions': [(build_bundle, (name, files))],
'targets': [output_path],
'uptodate': [utils.config_changed(kw)]
}
flag = True
yield utils.apply_filters(task, kw['filters'])
if flag is False: # No page rendered, yield a dummy task
yield {
'basename': self.name,
'uptodate': [True],
'name': 'None',
'actions': [],
}
def get_theme_bundles(themes):
"""Given a theme chain, return the bundle definitions."""
bundles = {}
for theme_name in themes:
bundles_path = os.path.join(
utils.get_theme_path(theme_name), 'bundles')
if os.path.isfile(bundles_path):
with open(bundles_path) as fd:
for line in fd:
name, files = line.split('=')
files = [f.strip() for f in files.split(',')]
bundles[name.strip()] = files
break
return bundles
| 38.38843
| 79
| 0.587944
|
1983ce97c3e6559de18de68790299c3039146c23
| 6,296
|
py
|
Python
|
test/test_auth_api.py
|
lanxinplus/lanxinplus-python-sdk
|
39ea9cb66a087df06e61ed4a2b473fb170a47f99
|
[
"MIT"
] | null | null | null |
test/test_auth_api.py
|
lanxinplus/lanxinplus-python-sdk
|
39ea9cb66a087df06e61ed4a2b473fb170a47f99
|
[
"MIT"
] | null | null | null |
test/test_auth_api.py
|
lanxinplus/lanxinplus-python-sdk
|
39ea9cb66a087df06e61ed4a2b473fb170a47f99
|
[
"MIT"
] | null | null | null |
"""
LanXin+ OpenAPI
LanXin+ OpenAPI Platform # noqa: E501
Generated by: https://openapi.lanxin.cn
"""
import unittest
from pprint import pprint
try:
import lanxinplus_openapi
except ImportError:
import sys
sys.path.append(sys.argv[0].replace("\\test\\test_auth_api.py", "\\")) # noqa: E501
import lanxinplus_openapi
from lanxinplus_openapi.api.auth_api import AuthApi # noqa: E501
from lanxinplus_openapi import Configuration
from lanxinplus_openapi.model.v1_app_token_create_response import V1AppTokenCreateResponse
from lanxinplus_openapi.model.v1_js_api_token_create_response import V1JsApiTokenCreateResponse
from lanxinplus_openapi.model.v1_user_token_create_response import V1UserTokenCreateResponse
from lanxinplus_openapi.model.v1_users_fetch_response import V1UsersFetchResponse
class TestAuthApi(unittest.TestCase):
"""AuthApi unit test stubs"""
@classmethod
def setUpClass(cls):
# 蓝信开放平台网关地址, e.g.: https://example.com/open/apigw
host = "host"
# 应用ID, e.g.: 1234567-7654321
app_id = "app_id"
# 应用密钥, e.g.: D25F65E65D887AEFD9C92B00310286FA
app_secret = "app_secret"
cls.config = Configuration(host, app_id, app_secret)
# Configuration.set_default(cls.config)
cls.client = lanxinplus_openapi.ApiClient(configuration=cls.config)
cls.api = AuthApi(api_client=cls.client) # noqa: E501
# get app_token
try:
api = AuthApi(api_client=cls.client)
resp = api.v1_app_token_create("client_credential", cls.config.app_id, cls.config.app_secret)
if resp.errCode == 0:
cls.config.set_app_token(resp.data.appToken, resp.data.expiresIn)
else:
raise lanxinplus_openapi.ApiException(reason=resp.errMsg)
except lanxinplus_openapi.ApiException as e:
print("Exception when calling AuthApi->v1_apptoken_create: %s\n" % e)
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
pass
def tearDown(self):
pass
def test_v1_app_token_create(self):
"""Test case for v1_app_token_create
获取应用访问TOKEN # noqa: E501
"""
grant_type = "grant_type_example" # str | client_credential
appid = self.config.app_id # str | 应用ID
secret = self.config.app_secret # str | 应用密钥
# example passing only required values which don't have defaults set
try:
# 获取应用访问TOKEN
resp = self.api.v1_app_token_create(grant_type, appid, secret)
print("TestCase AuthApi->v1_app_token_create: ")
pprint(resp)
self.assertEqual(resp.errCode, 0)
except lanxinplus_openapi.ApiException as e:
print("Exception when calling AuthApi->v1_app_token_create: %s\n" % e)
def test_v1_js_api_token_create(self):
"""Test case for v1_js_api_token_create
获取jsapi访问TOKEN # noqa: E501
"""
app_token = self.config.app_token # str | app_token
user_token = self.config.user_token # str | user_token (optional)
# example passing only required values which don't have defaults set
try:
# 获取jsapi访问TOKEN
resp = self.api.v1_js_api_token_create(app_token)
print("TestCase AuthApi->v1_js_api_token_create: ")
pprint(resp)
self.assertEqual(resp.errCode, 0)
except lanxinplus_openapi.ApiException as e:
print("Exception when calling AuthApi->v1_js_api_token_create: %s\n" % e)
'''
# example passing only required values which don't have defaults set
# and optional values
try:
# 获取jsapi访问TOKEN
resp = self.api.v1_js_api_token_create(app_token, user_token=user_token)
print("TestCase AuthApi->v1_js_api_token_create: ")
pprint(resp)
self.assertEqual(resp.errCode, 0)
except lanxinplus_openapi.ApiException as e:
print("Exception when calling AuthApi->v1_js_api_token_create: %s\n" % e)
'''
def test_v1_user_token_create(self):
"""Test case for v1_user_token_create
获取人员访问TOKEN # noqa: E501
"""
app_token = self.config.app_token # str | app_token
grant_type = "grant_type_example" # str | 使用固定值 'authorization_code'
code = "code_example" # str | 人员免登录授权码
redirect_uri = "redirect_uri_example" # str | redirect_uri (optional)
# example passing only required values which don't have defaults set
try:
# 获取人员访问TOKEN
resp = self.api.v1_user_token_create(app_token, grant_type, code)
print("TestCase AuthApi->v1_user_token_create: ")
pprint(resp)
self.assertEqual(resp.errCode, 0)
except lanxinplus_openapi.ApiException as e:
print("Exception when calling AuthApi->v1_user_token_create: %s\n" % e)
'''
# example passing only required values which don't have defaults set
# and optional values
try:
# 获取人员访问TOKEN
resp = self.api.v1_user_token_create(app_token, grant_type, code, redirect_uri=redirect_uri)
print("TestCase AuthApi->v1_user_token_create: ")
pprint(resp)
self.assertEqual(resp.errCode, 0)
except lanxinplus_openapi.ApiException as e:
print("Exception when calling AuthApi->v1_user_token_create: %s\n" % e)
'''
def test_v1_users_fetch(self):
"""Test case for v1_users_fetch
获取人员基本信息 # noqa: E501
"""
app_token = self.config.app_token # str | app_token
user_token = self.config.user_token # str | user_token
# example passing only required values which don't have defaults set
try:
# 获取人员基本信息
resp = self.api.v1_users_fetch(app_token, user_token)
print("TestCase AuthApi->v1_users_fetch: ")
pprint(resp)
self.assertEqual(resp.errCode, 0)
except lanxinplus_openapi.ApiException as e:
print("Exception when calling AuthApi->v1_users_fetch: %s\n" % e)
if __name__ == '__main__':
unittest.main()
| 35.772727
| 105
| 0.648189
|
09322a123cb65562bdadaebcc0f728a3ba47cb8a
| 3,668
|
py
|
Python
|
intersight/models/iam_end_point_user_policy_ref.py
|
gumpcraca/intersight-python
|
780e6703c739f329084beacbbf2ad7a6a2e59b2b
|
[
"Apache-2.0"
] | null | null | null |
intersight/models/iam_end_point_user_policy_ref.py
|
gumpcraca/intersight-python
|
780e6703c739f329084beacbbf2ad7a6a2e59b2b
|
[
"Apache-2.0"
] | null | null | null |
intersight/models/iam_end_point_user_policy_ref.py
|
gumpcraca/intersight-python
|
780e6703c739f329084beacbbf2ad7a6a2e59b2b
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Intersight REST API
This is Intersight REST API
OpenAPI spec version: 1.0.9-255
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class IamEndPointUserPolicyRef(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'moid': 'str',
'object_type': 'str'
}
attribute_map = {
'moid': 'Moid',
'object_type': 'ObjectType'
}
def __init__(self, moid=None, object_type=None):
"""
IamEndPointUserPolicyRef - a model defined in Swagger
"""
self._moid = None
self._object_type = None
if moid is not None:
self.moid = moid
if object_type is not None:
self.object_type = object_type
@property
def moid(self):
"""
Gets the moid of this IamEndPointUserPolicyRef.
:return: The moid of this IamEndPointUserPolicyRef.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this IamEndPointUserPolicyRef.
:param moid: The moid of this IamEndPointUserPolicyRef.
:type: str
"""
self._moid = moid
@property
def object_type(self):
"""
Gets the object_type of this IamEndPointUserPolicyRef.
:return: The object_type of this IamEndPointUserPolicyRef.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this IamEndPointUserPolicyRef.
:param object_type: The object_type of this IamEndPointUserPolicyRef.
:type: str
"""
self._object_type = object_type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, IamEndPointUserPolicyRef):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 24.453333
| 77
| 0.5488
|
776cc1c84f6fe027cef9b10a88ee2ec48221b64e
| 2,812
|
py
|
Python
|
pull_new_data.py
|
raphaelflash/covid19
|
afa7fe3efc9e836dab52171e30eb3fcdf20f4b30
|
[
"MIT"
] | 24
|
2020-03-18T10:37:08.000Z
|
2020-05-05T01:12:18.000Z
|
pull_new_data.py
|
raphaelflash/covid19
|
afa7fe3efc9e836dab52171e30eb3fcdf20f4b30
|
[
"MIT"
] | 18
|
2020-03-14T04:25:32.000Z
|
2020-07-05T20:47:58.000Z
|
pull_new_data.py
|
raphaelflash/covid19
|
afa7fe3efc9e836dab52171e30eb3fcdf20f4b30
|
[
"MIT"
] | 9
|
2020-03-18T12:13:20.000Z
|
2021-05-17T14:04:57.000Z
|
from collections import defaultdict
import datetime
from pprint import pprint
import json
import csv
from io import StringIO
import requests
from algoliasearch.search_client import SearchClient
client = SearchClient.create('ZOOMT5L4JY', '7157187130f0c6d53989725670982875')
index = client.init_index('covid')
total_deaths = {
'in': 'https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/ecdc/total_deaths.csv',
'out': 'src/data/total-deaths.json'
}
daily_deaths = {
'in': 'https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/ecdc/new_deaths.csv',
'out': 'src/data/daily-deaths.json'
}
total_cases = {
'in': 'https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/ecdc/total_cases.csv',
'out': 'src/data/total-cases.json'
}
daily_cases = {
'in': 'https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/ecdc/new_cases.csv',
'out': 'src/data/daily-cases.json'
}
infos = [total_deaths, daily_deaths, total_cases, daily_cases]
def fetch_csv(url: str):
resp = requests.get(url)
string_io = StringIO(resp.text)
reader = csv.DictReader(string_io)
countries = [field for field in reader.fieldnames if field not in ['date']]
country_to_values = defaultdict(list)
for line in reader:
date = datetime.date.fromisoformat(line['date'])
for country in countries:
country_to_values[country].append({'date': date, 'value': int(float(line[country] or 0))})
return country_to_values
start_date = datetime.date(year=2020, month=1, day=21)
def output_json(country_to_values, output_dest: str):
output_array = []
for country, values in country_to_values.items():
if country == 'World':
country = 'Worldwide'
for value in values:
days = (value['date'] - start_date).days
output_array.append({
'country': country,
'code': '',
'year': days,
'cases': value['value'],
})
with open(output_dest, 'w') as f_:
json.dump(output_array, f_)
def update_index(url: str):
resp = requests.get(url)
string_io = StringIO(resp.text)
reader = csv.DictReader(string_io)
countries = [field for field in reader.fieldnames if field not in ['date']]
index_objects = [{'name': country, 'objectID': country} for country in countries if country != 'World']
index_objects.append({'name': 'Worldwide', 'objectID': 'World'})
index.replace_all_objects(index_objects)
update_index(daily_cases['in'])
for info in infos:
github_url = info['in']
output_path = info['out']
print('Processing', github_url)
country_to_values = fetch_csv(github_url)
output_json(country_to_values, output_path)
| 29.6
| 107
| 0.676031
|
2f2d2a5fd9d64db114fd0dc686c97ad38b8ad9b3
| 43,356
|
py
|
Python
|
capa/ida/plugin/view.py
|
evandowning/capa
|
03b15ce28977ffcc617d3c67c9dff20de7ee6196
|
[
"Apache-2.0"
] | 1
|
2020-07-16T20:04:55.000Z
|
2020-07-16T20:04:55.000Z
|
capa/ida/plugin/view.py
|
evandowning/capa
|
03b15ce28977ffcc617d3c67c9dff20de7ee6196
|
[
"Apache-2.0"
] | 47
|
2021-03-17T10:41:44.000Z
|
2022-03-28T04:03:01.000Z
|
capa/ida/plugin/view.py
|
CrackerCat/capa
|
b84cc3128d2d54a4b5550bfc3fd05c09e338c2b1
|
[
"Apache-2.0"
] | 3
|
2021-05-19T20:18:35.000Z
|
2021-05-19T21:02:18.000Z
|
# Copyright (C) 2020 FireEye, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import re
from collections import Counter
import idc
from PyQt5 import QtGui, QtCore, QtWidgets
import capa.rules
import capa.engine
import capa.ida.helpers
import capa.features.common
import capa.features.basicblock
from capa.ida.plugin.item import CapaExplorerFunctionItem
from capa.ida.plugin.model import CapaExplorerDataModel
MAX_SECTION_SIZE = 750
# default colors used in views
COLOR_GREEN_RGB = (79, 121, 66)
COLOR_BLUE_RGB = (37, 147, 215)
def calc_level_by_indent(line, prev_level=0):
""" """
if not len(line.strip()):
# blank line, which may occur for comments so we simply use the last level
return prev_level
stripped = line.lstrip()
if stripped.startswith("description"):
# need to adjust two spaces when encountering string description
line = line[2:]
# calc line level based on preceding whitespace
return len(line) - len(stripped)
def parse_feature_for_node(feature):
""" """
description = ""
comment = ""
if feature.startswith("- count"):
# count is weird, we need to handle special
# first, we need to grab the comment, if exists
# next, we need to check for an embedded description
feature, _, comment = feature.partition("#")
m = re.search(r"- count\(([a-zA-Z]+)\((.+)\s+=\s+(.+)\)\):\s*(.+)", feature)
if m:
# reconstruct count without description
feature, value, description, count = m.groups()
feature = "- count(%s(%s)): %s" % (feature, value, count)
elif not feature.startswith("#"):
feature, _, comment = feature.partition("#")
feature, _, description = feature.partition("=")
return map(lambda o: o.strip(), (feature, description, comment))
def parse_node_for_feature(feature, description, comment, depth):
""" """
depth = (depth * 2) + 4
display = ""
if feature.startswith("#"):
display += "%s%s\n" % (" " * depth, feature)
elif description:
if feature.startswith(("- and", "- or", "- optional", "- basic block", "- not")):
display += "%s%s" % (" " * depth, feature)
if comment:
display += " # %s" % comment
display += "\n%s- description: %s\n" % (" " * (depth + 2), description)
elif feature.startswith("- string"):
display += "%s%s" % (" " * depth, feature)
if comment:
display += " # %s" % comment
display += "\n%sdescription: %s\n" % (" " * (depth + 2), description)
elif feature.startswith("- count"):
# count is weird, we need to format description based on feature type, so we parse with regex
# assume format - count(<feature_name>(<feature_value>)): <count>
m = re.search(r"- count\(([a-zA-Z]+)\((.+)\)\): (.+)", feature)
if m:
name, value, count = m.groups()
if name in ("string",):
display += "%s%s" % (" " * depth, feature)
if comment:
display += " # %s" % comment
display += "\n%sdescription: %s\n" % (" " * (depth + 2), description)
else:
display += "%s- count(%s(%s = %s)): %s" % (
" " * depth,
name,
value,
description,
count,
)
if comment:
display += " # %s\n" % comment
else:
display += "%s%s = %s" % (" " * depth, feature, description)
if comment:
display += " # %s\n" % comment
else:
display += "%s%s" % (" " * depth, feature)
if comment:
display += " # %s\n" % comment
return display if display.endswith("\n") else display + "\n"
def yaml_to_nodes(s):
level = 0
for line in s.splitlines():
feature, description, comment = parse_feature_for_node(line.strip())
o = QtWidgets.QTreeWidgetItem(None)
# set node attributes
setattr(o, "capa_level", calc_level_by_indent(line, level))
if feature.startswith(("- and:", "- or:", "- not:", "- basic block:", "- optional:")):
setattr(o, "capa_type", CapaExplorerRulgenEditor.get_node_type_expression())
elif feature.startswith("#"):
setattr(o, "capa_type", CapaExplorerRulgenEditor.get_node_type_comment())
else:
setattr(o, "capa_type", CapaExplorerRulgenEditor.get_node_type_feature())
# set node text
for (i, v) in enumerate((feature, description, comment)):
o.setText(i, v)
yield o
def iterate_tree(o):
""" """
itr = QtWidgets.QTreeWidgetItemIterator(o)
while itr.value():
yield itr.value()
itr += 1
def calc_item_depth(o):
""" """
depth = 0
while True:
if not o.parent():
break
depth += 1
o = o.parent()
return depth
def build_action(o, display, data, slot):
""" """
action = QtWidgets.QAction(display, o)
action.setData(data)
action.triggered.connect(lambda checked: slot(action))
return action
def build_context_menu(o, actions):
""" """
menu = QtWidgets.QMenu()
for action in actions:
if isinstance(action, QtWidgets.QMenu):
menu.addMenu(action)
else:
menu.addAction(build_action(o, *action))
return menu
class CapaExplorerRulgenPreview(QtWidgets.QTextEdit):
INDENT = " " * 2
def __init__(self, parent=None):
""" """
super(CapaExplorerRulgenPreview, self).__init__(parent)
self.setFont(QtGui.QFont("Courier", weight=QtGui.QFont.Bold))
self.setLineWrapMode(QtWidgets.QTextEdit.NoWrap)
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
def reset_view(self):
""" """
self.clear()
def load_preview_meta(self, ea, author, scope):
""" """
metadata_default = [
"# generated using capa explorer for IDA Pro",
"rule:",
" meta:",
" name: <insert_name>",
" namespace: <insert_namespace>",
" author: %s" % author,
" scope: %s" % scope,
" references: <insert_references>",
" examples:",
" - %s:0x%X" % (capa.ida.helpers.get_file_md5().upper(), ea)
if ea
else " - %s" % (capa.ida.helpers.get_file_md5().upper()),
" features:",
]
self.setText("\n".join(metadata_default))
def keyPressEvent(self, e):
"""intercept key press events"""
if e.key() in (QtCore.Qt.Key_Tab, QtCore.Qt.Key_Backtab):
# apparently it's not easy to implement tabs as spaces, or multi-line tab or SHIFT + Tab
# so we need to implement it ourselves so we can retain properly formatted capa rules
# when a user uses the Tab key
if self.textCursor().selection().isEmpty():
# single line, only worry about Tab
if e.key() == QtCore.Qt.Key_Tab:
self.insertPlainText(self.INDENT)
else:
# multi-line tab or SHIFT + Tab
cur = self.textCursor()
select_start_ppos = cur.selectionStart()
select_end_ppos = cur.selectionEnd()
scroll_ppos = self.verticalScrollBar().sliderPosition()
# determine lineno for first selected line, and column
cur.setPosition(select_start_ppos)
start_lineno = self.count_previous_lines_from_block(cur.block())
start_lineco = cur.columnNumber()
# determine lineno for last selected line
cur.setPosition(select_end_ppos)
end_lineno = self.count_previous_lines_from_block(cur.block())
# now we need to indent or dedent the selected lines. for now, we read the text, modify
# the lines between start_lineno and end_lineno accordingly, and then reset the view
# this might not be the best solution, but it avoids messing around with cursor positions
# to determine the beginning of lines
plain = self.toPlainText().splitlines()
if e.key() == QtCore.Qt.Key_Tab:
# user Tab, indent selected lines
lines_modified = end_lineno - start_lineno
first_modified = True
change = [self.INDENT + line for line in plain[start_lineno : end_lineno + 1]]
else:
# user SHIFT + Tab, dedent selected lines
lines_modified = 0
first_modified = False
change = []
for (lineno, line) in enumerate(plain[start_lineno : end_lineno + 1]):
if line.startswith(self.INDENT):
if lineno == 0:
# keep track if first line is modified, so we can properly display
# the text selection later
first_modified = True
lines_modified += 1
line = line[len(self.INDENT) :]
change.append(line)
# apply modifications, and reset view
plain[start_lineno : end_lineno + 1] = change
self.setPlainText("\n".join(plain) + "\n")
# now we need to properly adjust the selection positions, so users don't have to
# re-select when indenting or dedenting the same lines repeatedly
if e.key() == QtCore.Qt.Key_Tab:
# user Tab, increase increment selection positions
select_start_ppos += len(self.INDENT)
select_end_ppos += (lines_modified * len(self.INDENT)) + len(self.INDENT)
elif lines_modified:
# user SHIFT + Tab, decrease selection positions
if start_lineco not in (0, 1) and first_modified:
# only decrease start position if not in first column
select_start_ppos -= len(self.INDENT)
select_end_ppos -= lines_modified * len(self.INDENT)
# apply updated selection and restore previous scroll position
self.set_selection(select_start_ppos, select_end_ppos, len(self.toPlainText()))
self.verticalScrollBar().setSliderPosition(scroll_ppos)
else:
super(CapaExplorerRulgenPreview, self).keyPressEvent(e)
def count_previous_lines_from_block(self, block):
"""calculate number of lines preceding block"""
count = 0
while True:
block = block.previous()
if not block.isValid():
break
count += block.lineCount()
return count
def set_selection(self, start, end, max):
"""set text selection"""
cursor = self.textCursor()
cursor.setPosition(start)
cursor.setPosition(end if end < max else max, QtGui.QTextCursor.KeepAnchor)
self.setTextCursor(cursor)
class CapaExplorerRulgenEditor(QtWidgets.QTreeWidget):
updated = QtCore.pyqtSignal()
def __init__(self, preview, parent=None):
""" """
super(CapaExplorerRulgenEditor, self).__init__(parent)
self.preview = preview
self.setHeaderLabels(["Feature", "Description", "Comment"])
self.header().setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
self.header().setStretchLastSection(False)
self.setExpandsOnDoubleClick(False)
self.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.setStyleSheet("QTreeView::item {padding-right: 15 px;padding-bottom: 2 px;}")
# enable drag and drop
self.setDragEnabled(True)
self.setAcceptDrops(True)
self.setDragDropMode(QtWidgets.QAbstractItemView.InternalMove)
# connect slots
self.itemChanged.connect(self.slot_item_changed)
self.customContextMenuRequested.connect(self.slot_custom_context_menu_requested)
self.itemDoubleClicked.connect(self.slot_item_double_clicked)
self.root = None
self.reset_view()
self.is_editing = False
@staticmethod
def get_column_feature_index():
""" """
return 0
@staticmethod
def get_column_description_index():
""" """
return 1
@staticmethod
def get_column_comment_index():
""" """
return 2
@staticmethod
def get_node_type_expression():
""" """
return 0
@staticmethod
def get_node_type_feature():
""" """
return 1
@staticmethod
def get_node_type_comment():
""" """
return 2
def dragMoveEvent(self, e):
""" """
super(CapaExplorerRulgenEditor, self).dragMoveEvent(e)
def dragEventEnter(self, e):
""" """
super(CapaExplorerRulgenEditor, self).dragEventEnter(e)
def dropEvent(self, e):
""" """
if not self.indexAt(e.pos()).isValid():
return
super(CapaExplorerRulgenEditor, self).dropEvent(e)
# self.prune_expressions()
self.update_preview()
self.expandAll()
def reset_view(self):
""" """
self.root = None
self.clear()
def slot_item_changed(self, item, column):
""" """
if self.is_editing:
self.update_preview()
self.is_editing = False
def slot_remove_selected(self, action):
""" """
for o in self.selectedItems():
if o == self.root:
self.takeTopLevelItem(self.indexOfTopLevelItem(o))
self.root = None
continue
o.parent().removeChild(o)
def slot_nest_features(self, action):
""" """
# create a new parent under root node, by default; new node added last position in tree
new_parent = self.new_expression_node(self.root, (action.data()[0], ""))
if "basic block" in action.data()[0]:
# add default child expression when nesting under basic block
new_parent.setExpanded(True)
new_parent = self.new_expression_node(new_parent, ("- or:", ""))
for o in self.get_features(selected=True):
# take child from its parent by index, add to new parent
new_parent.addChild(o.parent().takeChild(o.parent().indexOfChild(o)))
# ensure new parent expanded
new_parent.setExpanded(True)
def slot_edit_expression(self, action):
""" """
expression, o = action.data()
if "basic block" in expression and "basic block" not in o.text(
CapaExplorerRulgenEditor.get_column_feature_index()
):
# current expression is "basic block", and not changing to "basic block" expression
children = o.takeChildren()
new_parent = self.new_expression_node(o, ("- or:", ""))
for child in children:
new_parent.addChild(child)
new_parent.setExpanded(True)
o.setText(CapaExplorerRulgenEditor.get_column_feature_index(), expression)
def slot_clear_all(self, action):
""" """
self.reset_view()
def slot_custom_context_menu_requested(self, pos):
""" """
if not self.indexAt(pos).isValid():
# user selected invalid index
self.load_custom_context_menu_invalid_index(pos)
elif self.itemAt(pos).capa_type == CapaExplorerRulgenEditor.get_node_type_expression():
# user selected expression node
self.load_custom_context_menu_expression(pos)
else:
# user selected feature node
self.load_custom_context_menu_feature(pos)
self.update_preview()
def slot_item_double_clicked(self, o, column):
""" """
if column in (
CapaExplorerRulgenEditor.get_column_comment_index(),
CapaExplorerRulgenEditor.get_column_description_index(),
):
o.setFlags(o.flags() | QtCore.Qt.ItemIsEditable)
self.editItem(o, column)
o.setFlags(o.flags() & ~QtCore.Qt.ItemIsEditable)
self.is_editing = True
def update_preview(self):
""" """
rule_text = self.preview.toPlainText()
if -1 != rule_text.find("features:"):
rule_text = rule_text[: rule_text.find("features:") + len("features:")]
rule_text += "\n"
else:
rule_text = rule_text.rstrip()
rule_text += "\n features:\n"
for o in iterate_tree(self):
feature, description, comment = map(lambda o: o.strip(), tuple(o.text(i) for i in range(3)))
rule_text += parse_node_for_feature(feature, description, comment, calc_item_depth(o))
# FIXME we avoid circular update by disabling signals when updating
# the preview. Preferably we would refactor the code to avoid this
# in the first place
self.preview.blockSignals(True)
self.preview.setPlainText(rule_text)
self.preview.blockSignals(False)
# emit signal so views can update
self.updated.emit()
def load_custom_context_menu_invalid_index(self, pos):
""" """
actions = (("Remove all", (), self.slot_clear_all),)
menu = build_context_menu(self.parent(), actions)
menu.exec_(self.viewport().mapToGlobal(pos))
def load_custom_context_menu_feature(self, pos):
""" """
actions = (("Remove selection", (), self.slot_remove_selected),)
sub_actions = (
("and", ("- and:",), self.slot_nest_features),
("or", ("- or:",), self.slot_nest_features),
("not", ("- not:",), self.slot_nest_features),
("optional", ("- optional:",), self.slot_nest_features),
("basic block", ("- basic block:",), self.slot_nest_features),
)
# build submenu with modify actions
sub_menu = build_context_menu(self.parent(), sub_actions)
sub_menu.setTitle("Nest feature%s" % ("" if len(tuple(self.get_features(selected=True))) == 1 else "s"))
# build main menu with submenu + main actions
menu = build_context_menu(self.parent(), (sub_menu,) + actions)
menu.exec_(self.viewport().mapToGlobal(pos))
def load_custom_context_menu_expression(self, pos):
""" """
actions = (("Remove expression", (), self.slot_remove_selected),)
sub_actions = (
("and", ("- and:", self.itemAt(pos)), self.slot_edit_expression),
("or", ("- or:", self.itemAt(pos)), self.slot_edit_expression),
("not", ("- not:", self.itemAt(pos)), self.slot_edit_expression),
("optional", ("- optional:", self.itemAt(pos)), self.slot_edit_expression),
("basic block", ("- basic block:", self.itemAt(pos)), self.slot_edit_expression),
)
# build submenu with modify actions
sub_menu = build_context_menu(self.parent(), sub_actions)
sub_menu.setTitle("Modify")
# build main menu with submenu + main actions
menu = build_context_menu(self.parent(), (sub_menu,) + actions)
menu.exec_(self.viewport().mapToGlobal(pos))
def style_expression_node(self, o):
""" """
font = QtGui.QFont()
font.setBold(True)
o.setFont(CapaExplorerRulgenEditor.get_column_feature_index(), font)
def style_feature_node(self, o):
""" """
font = QtGui.QFont()
brush = QtGui.QBrush()
font.setFamily("Courier")
font.setWeight(QtGui.QFont.Medium)
brush.setColor(QtGui.QColor(*COLOR_GREEN_RGB))
o.setFont(CapaExplorerRulgenEditor.get_column_feature_index(), font)
o.setForeground(CapaExplorerRulgenEditor.get_column_feature_index(), brush)
def style_comment_node(self, o):
""" """
font = QtGui.QFont()
font.setBold(True)
font.setFamily("Courier")
o.setFont(CapaExplorerRulgenEditor.get_column_feature_index(), font)
def set_expression_node(self, o):
""" """
setattr(o, "capa_type", CapaExplorerRulgenEditor.get_node_type_expression())
self.style_expression_node(o)
def set_feature_node(self, o):
""" """
setattr(o, "capa_type", CapaExplorerRulgenEditor.get_node_type_feature())
o.setFlags(o.flags() & ~QtCore.Qt.ItemIsDropEnabled)
self.style_feature_node(o)
def set_comment_node(self, o):
""" """
setattr(o, "capa_type", CapaExplorerRulgenEditor.get_node_type_comment())
o.setFlags(o.flags() & ~QtCore.Qt.ItemIsDropEnabled)
self.style_comment_node(o)
def new_expression_node(self, parent, values=()):
""" """
o = QtWidgets.QTreeWidgetItem(parent)
self.set_expression_node(o)
for (i, v) in enumerate(values):
o.setText(i, v)
return o
def new_feature_node(self, parent, values=()):
""" """
o = QtWidgets.QTreeWidgetItem(parent)
self.set_feature_node(o)
for (i, v) in enumerate(values):
o.setText(i, v)
return o
def new_comment_node(self, parent, values=()):
""" """
o = QtWidgets.QTreeWidgetItem(parent)
self.set_comment_node(o)
for (i, v) in enumerate(values):
o.setText(i, v)
return o
def update_features(self, features):
""" """
if not self.root:
# root node does not exist, create default node, set expanded
self.root = self.new_expression_node(self, ("- or:", ""))
# build feature counts
counted = list(zip(Counter(features).keys(), Counter(features).values()))
# single features
for (k, v) in filter(lambda t: t[1] == 1, counted):
if isinstance(k, (capa.features.common.String,)):
value = '"%s"' % capa.features.common.escape_string(k.get_value_str())
else:
value = k.get_value_str()
self.new_feature_node(self.root, ("- %s: %s" % (k.name.lower(), value), ""))
# n > 1 features
for (k, v) in filter(lambda t: t[1] > 1, counted):
if k.value:
if isinstance(k, (capa.features.common.String,)):
value = '"%s"' % capa.features.common.escape_string(k.get_value_str())
else:
value = k.get_value_str()
display = "- count(%s(%s)): %d" % (k.name.lower(), value, v)
else:
display = "- count(%s): %d" % (k.name.lower(), v)
self.new_feature_node(self.root, (display, ""))
self.expandAll()
self.update_preview()
def load_features_from_yaml(self, rule_text, update_preview=False):
""" """
def add_node(parent, node):
if node.text(0).startswith("description:"):
if parent.childCount():
parent.child(parent.childCount() - 1).setText(1, node.text(0).lstrip("description:").lstrip())
else:
parent.setText(1, node.text(0).lstrip("description:").lstrip())
elif node.text(0).startswith("- description:"):
parent.setText(1, node.text(0).lstrip("- description:").lstrip())
else:
parent.addChild(node)
def build(parent, nodes):
if nodes:
child_lvl = nodes[0].capa_level
while nodes:
node = nodes.pop(0)
if node.capa_level == child_lvl:
add_node(parent, node)
elif node.capa_level > child_lvl:
nodes.insert(0, node)
build(parent.child(parent.childCount() - 1), nodes)
else:
parent = parent.parent() if parent.parent() else parent
add_node(parent, node)
self.reset_view()
# check for lack of features block
if -1 == rule_text.find("features:"):
return
rule_features = rule_text[rule_text.find("features:") + len("features:") :].strip()
rule_nodes = list(yaml_to_nodes(rule_features))
# check for lack of nodes
if not rule_nodes:
return
for o in rule_nodes:
(self.set_expression_node, self.set_feature_node, self.set_comment_node)[o.capa_type](o)
self.root = rule_nodes.pop(0)
self.addTopLevelItem(self.root)
if update_preview:
self.preview.blockSignals(True)
self.preview.setPlainText(rule_text)
self.preview.blockSignals(False)
build(self.root, rule_nodes)
self.expandAll()
def get_features(self, selected=False, ignore=()):
""" """
for feature in filter(
lambda o: o.capa_type
in (CapaExplorerRulgenEditor.get_node_type_feature(), CapaExplorerRulgenEditor.get_node_type_comment()),
tuple(iterate_tree(self)),
):
if feature in ignore:
continue
if selected and not feature.isSelected():
continue
yield feature
def get_expressions(self, selected=False, ignore=()):
""" """
for expression in filter(
lambda o: o.capa_type == CapaExplorerRulgenEditor.get_node_type_expression(), tuple(iterate_tree(self))
):
if expression in ignore:
continue
if selected and not expression.isSelected():
continue
yield expression
class CapaExplorerRulegenFeatures(QtWidgets.QTreeWidget):
def __init__(self, editor, parent=None):
""" """
super(CapaExplorerRulegenFeatures, self).__init__(parent)
self.parent_items = {}
self.editor = editor
self.setHeaderLabels(["Feature", "Virtual Address"])
self.header().setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
self.setStyleSheet("QTreeView::item {padding-right: 15 px;padding-bottom: 2 px;}")
self.setExpandsOnDoubleClick(False)
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
# connect slots
self.itemDoubleClicked.connect(self.slot_item_double_clicked)
self.customContextMenuRequested.connect(self.slot_custom_context_menu_requested)
self.reset_view()
@staticmethod
def get_column_feature_index():
""" """
return 0
@staticmethod
def get_column_address_index():
""" """
return 1
@staticmethod
def get_node_type_parent():
""" """
return 0
@staticmethod
def get_node_type_leaf():
""" """
return 1
def reset_view(self):
""" """
self.clear()
def slot_add_selected_features(self, action):
""" """
selected = [item.data(0, 0x100) for item in self.selectedItems()]
if selected:
self.editor.update_features(selected)
def slot_custom_context_menu_requested(self, pos):
""" """
actions = []
action_add_features_fmt = ""
selected_items_count = len(self.selectedItems())
if selected_items_count == 0:
return
if selected_items_count == 1:
action_add_features_fmt = "Add feature"
else:
action_add_features_fmt = "Add %d features" % selected_items_count
actions.append((action_add_features_fmt, (), self.slot_add_selected_features))
menu = build_context_menu(self.parent(), actions)
menu.exec_(self.viewport().mapToGlobal(pos))
def slot_item_double_clicked(self, o, column):
""" """
if column == CapaExplorerRulegenFeatures.get_column_address_index() and o.text(column):
idc.jumpto(int(o.text(column), 0x10))
elif o.capa_type == CapaExplorerRulegenFeatures.get_node_type_leaf():
self.editor.update_features([o.data(0, 0x100)])
def show_all_items(self):
""" """
for o in iterate_tree(self):
o.setHidden(False)
o.setExpanded(False)
def filter_items_by_text(self, text):
""" """
if text:
for o in iterate_tree(self):
data = o.data(0, 0x100)
if data:
to_match = data.get_value_str()
if not to_match or text.lower() not in to_match.lower():
o.setHidden(True)
continue
o.setHidden(False)
o.setExpanded(True)
else:
self.show_all_items()
def style_parent_node(self, o):
""" """
font = QtGui.QFont()
font.setBold(True)
o.setFont(CapaExplorerRulegenFeatures.get_column_feature_index(), font)
def style_leaf_node(self, o):
""" """
font = QtGui.QFont("Courier", weight=QtGui.QFont.Bold)
brush = QtGui.QBrush()
o.setFont(CapaExplorerRulegenFeatures.get_column_feature_index(), font)
o.setFont(CapaExplorerRulegenFeatures.get_column_address_index(), font)
brush.setColor(QtGui.QColor(*COLOR_GREEN_RGB))
o.setForeground(CapaExplorerRulegenFeatures.get_column_feature_index(), brush)
brush.setColor(QtGui.QColor(*COLOR_BLUE_RGB))
o.setForeground(CapaExplorerRulegenFeatures.get_column_address_index(), brush)
def set_parent_node(self, o):
""" """
o.setFlags(o.flags() & ~QtCore.Qt.ItemIsSelectable)
setattr(o, "capa_type", CapaExplorerRulegenFeatures.get_node_type_parent())
self.style_parent_node(o)
def set_leaf_node(self, o):
""" """
setattr(o, "capa_type", CapaExplorerRulegenFeatures.get_node_type_leaf())
self.style_leaf_node(o)
def new_parent_node(self, parent, data, feature=None):
""" """
o = QtWidgets.QTreeWidgetItem(parent)
self.set_parent_node(o)
for (i, v) in enumerate(data):
o.setText(i, v)
if feature:
o.setData(0, 0x100, feature)
return o
def new_leaf_node(self, parent, data, feature=None):
""" """
o = QtWidgets.QTreeWidgetItem(parent)
self.set_leaf_node(o)
for (i, v) in enumerate(data):
o.setText(i, v)
if feature:
o.setData(0, 0x100, feature)
return o
def load_features(self, file_features, func_features={}):
""" """
self.parse_features_for_tree(self.new_parent_node(self, ("File Scope",)), file_features)
if func_features:
self.parse_features_for_tree(self.new_parent_node(self, ("Function/Basic Block Scope",)), func_features)
def parse_features_for_tree(self, parent, features):
""" """
self.parent_items = {}
def format_address(e):
return "%X" % e if e else ""
def format_feature(feature):
""" """
name = feature.name.lower()
value = feature.get_value_str()
if isinstance(feature, (capa.features.common.String,)):
value = '"%s"' % capa.features.common.escape_string(value)
return "%s(%s)" % (name, value)
for (feature, eas) in sorted(features.items(), key=lambda k: sorted(k[1])):
if isinstance(feature, capa.features.basicblock.BasicBlock):
# filter basic blocks for now, we may want to add these back in some time
# in the future
continue
# level 0
if type(feature) not in self.parent_items:
self.parent_items[type(feature)] = self.new_parent_node(parent, (feature.name.lower(),))
# level 1
if feature not in self.parent_items:
if len(eas) > 1:
self.parent_items[feature] = self.new_parent_node(
self.parent_items[type(feature)], (format_feature(feature),), feature=feature
)
else:
self.parent_items[feature] = self.new_leaf_node(
self.parent_items[type(feature)], (format_feature(feature),), feature=feature
)
# level n > 1
if len(eas) > 1:
for ea in sorted(eas):
self.new_leaf_node(
self.parent_items[feature], (format_feature(feature), format_address(ea)), feature=feature
)
else:
ea = eas.pop()
for (i, v) in enumerate((format_feature(feature), format_address(ea))):
self.parent_items[feature].setText(i, v)
self.parent_items[feature].setData(0, 0x100, feature)
class CapaExplorerQtreeView(QtWidgets.QTreeView):
"""tree view used to display hierarchical capa results
view controls UI action responses and displays data from CapaExplorerDataModel
view does not modify CapaExplorerDataModel directly - data modifications should be implemented
in CapaExplorerDataModel
"""
def __init__(self, model, parent=None):
"""initialize view"""
super(CapaExplorerQtreeView, self).__init__(parent)
self.setModel(model)
self.model = model
self.parent = parent
# control when we resize columns
self.should_resize_columns = True
# configure custom UI controls
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.setExpandsOnDoubleClick(False)
self.setSortingEnabled(True)
self.model.setDynamicSortFilter(False)
# configure view columns to auto-resize
for idx in range(CapaExplorerDataModel.COLUMN_COUNT):
self.header().setSectionResizeMode(idx, QtWidgets.QHeaderView.Interactive)
# disable stretch to enable horizontal scroll for last column, when needed
self.header().setStretchLastSection(False)
# connect slots to resize columns when expanded or collapsed
self.expanded.connect(self.slot_resize_columns_to_content)
self.collapsed.connect(self.slot_resize_columns_to_content)
# connect slots
self.customContextMenuRequested.connect(self.slot_custom_context_menu_requested)
self.doubleClicked.connect(self.slot_double_click)
self.setStyleSheet("QTreeView::item {padding-right: 15 px;padding-bottom: 2 px;}")
def reset_ui(self, should_sort=True):
"""reset user interface changes
called when view should reset UI display e.g. expand items, resize columns
@param should_sort: True, sort results after reset, False don't sort results after reset
"""
if should_sort:
self.sortByColumn(CapaExplorerDataModel.COLUMN_INDEX_RULE_INFORMATION, QtCore.Qt.AscendingOrder)
self.should_resize_columns = False
self.expandToDepth(0)
self.should_resize_columns = True
self.slot_resize_columns_to_content()
def slot_resize_columns_to_content(self):
"""reset view columns to contents"""
if self.should_resize_columns:
self.header().resizeSections(QtWidgets.QHeaderView.ResizeToContents)
# limit size of first section
if self.header().sectionSize(0) > MAX_SECTION_SIZE:
self.header().resizeSection(0, MAX_SECTION_SIZE)
def map_index_to_source_item(self, model_index):
"""map proxy model index to source model item
@param model_index: QModelIndex
@retval QObject
"""
# assume that self.model here is either:
# - CapaExplorerDataModel, or
# - QSortFilterProxyModel subclass
#
# The ProxyModels may be chained,
# so keep resolving the index the CapaExplorerDataModel.
model = self.model
while not isinstance(model, CapaExplorerDataModel):
if not model_index.isValid():
raise ValueError("invalid index")
model_index = model.mapToSource(model_index)
model = model.sourceModel()
if not model_index.isValid():
raise ValueError("invalid index")
return model_index.internalPointer()
def send_data_to_clipboard(self, data):
"""copy data to the clipboard
@param data: data to be copied
"""
clip = QtWidgets.QApplication.clipboard()
clip.clear(mode=clip.Clipboard)
clip.setText(data, mode=clip.Clipboard)
def new_action(self, display, data, slot):
"""create action for context menu
@param display: text displayed to user in context menu
@param data: data passed to slot
@param slot: slot to connect
@retval QAction
"""
action = QtWidgets.QAction(display, self.parent)
action.setData(data)
action.triggered.connect(lambda checked: slot(action))
return action
def load_default_context_menu_actions(self, data):
"""yield actions specific to function custom context menu
@param data: tuple
@yield QAction
"""
default_actions = (
("Copy column", data, self.slot_copy_column),
("Copy row", data, self.slot_copy_row),
)
# add default actions
for action in default_actions:
yield self.new_action(*action)
def load_function_context_menu_actions(self, data):
"""yield actions specific to function custom context menu
@param data: tuple
@yield QAction
"""
function_actions = (("Rename function", data, self.slot_rename_function),)
# add function actions
for action in function_actions:
yield self.new_action(*action)
# add default actions
for action in self.load_default_context_menu_actions(data):
yield action
def load_default_context_menu(self, pos, item, model_index):
"""create default custom context menu
creates custom context menu containing default actions
@param pos: cursor position
@param item: CapaExplorerDataItem
@param model_index: QModelIndex
@retval QMenu
"""
menu = QtWidgets.QMenu()
for action in self.load_default_context_menu_actions((pos, item, model_index)):
menu.addAction(action)
return menu
def load_function_item_context_menu(self, pos, item, model_index):
"""create function custom context menu
creates custom context menu with both default actions and function actions
@param pos: cursor position
@param item: CapaExplorerDataItem
@param model_index: QModelIndex
@retval QMenu
"""
menu = QtWidgets.QMenu()
for action in self.load_function_context_menu_actions((pos, item, model_index)):
menu.addAction(action)
return menu
def show_custom_context_menu(self, menu, pos):
"""display custom context menu in view
@param menu: QMenu to display
@param pos: cursor position
"""
if menu:
menu.exec_(self.viewport().mapToGlobal(pos))
def slot_copy_column(self, action):
"""slot connected to custom context menu
allows user to select a column and copy the data to clipboard
@param action: QAction
"""
_, item, model_index = action.data()
self.send_data_to_clipboard(item.data(model_index.column()))
def slot_copy_row(self, action):
"""slot connected to custom context menu
allows user to select a row and copy the space-delimited data to clipboard
@param action: QAction
"""
_, item, _ = action.data()
self.send_data_to_clipboard(str(item))
def slot_rename_function(self, action):
"""slot connected to custom context menu
allows user to select a edit a function name and push changes to IDA
@param action: QAction
"""
_, item, model_index = action.data()
# make item temporary edit, reset after user is finished
item.setIsEditable(True)
self.edit(model_index)
item.setIsEditable(False)
def slot_custom_context_menu_requested(self, pos):
"""slot connected to custom context menu request
displays custom context menu to user containing action relevant to the item selected
@param pos: cursor position
"""
model_index = self.indexAt(pos)
if not model_index.isValid():
return
item = self.map_index_to_source_item(model_index)
column = model_index.column()
menu = None
if CapaExplorerDataModel.COLUMN_INDEX_RULE_INFORMATION == column and isinstance(item, CapaExplorerFunctionItem):
# user hovered function item
menu = self.load_function_item_context_menu(pos, item, model_index)
else:
# user hovered default item
menu = self.load_default_context_menu(pos, item, model_index)
# show custom context menu at view position
self.show_custom_context_menu(menu, pos)
def slot_double_click(self, model_index):
"""slot connected to double-click event
if address column clicked, navigate IDA to address, else un/expand item clicked
@param model_index: QModelIndex
"""
if not model_index.isValid():
return
item = self.map_index_to_source_item(model_index)
column = model_index.column()
if CapaExplorerDataModel.COLUMN_INDEX_VIRTUAL_ADDRESS == column and item.location:
# user double-clicked virtual address column - navigate IDA to address
idc.jumpto(item.location)
if CapaExplorerDataModel.COLUMN_INDEX_RULE_INFORMATION == column:
# user double-clicked information column - un/expand
self.collapse(model_index) if self.isExpanded(model_index) else self.expand(model_index)
| 35.566858
| 120
| 0.596688
|
ff71b7c5d9327ff29b90fa58d83ad8add73693d9
| 20,306
|
py
|
Python
|
venv/Lib/site-packages/scipy/sparse/linalg/tests/test_matfuncs.py
|
mokshagna517/recommendation_sys
|
bc8ced225dff3c93d619ff5da363f42d0aa0676c
|
[
"MIT"
] | 366
|
2019-04-07T20:34:48.000Z
|
2022-03-29T07:35:38.000Z
|
venv/Lib/site-packages/scipy/sparse/linalg/tests/test_matfuncs.py
|
mokshagna517/recommendation_sys
|
bc8ced225dff3c93d619ff5da363f42d0aa0676c
|
[
"MIT"
] | 26
|
2020-03-24T18:07:06.000Z
|
2022-03-12T00:12:27.000Z
|
venv/Lib/site-packages/scipy/sparse/linalg/tests/test_matfuncs.py
|
mokshagna517/recommendation_sys
|
bc8ced225dff3c93d619ff5da363f42d0aa0676c
|
[
"MIT"
] | 61
|
2019-04-08T00:58:14.000Z
|
2022-03-20T23:04:28.000Z
|
#
# Created by: Pearu Peterson, March 2002
#
""" Test functions for scipy.linalg.matfuncs module
"""
from __future__ import division, print_function, absolute_import
import math
import numpy as np
from numpy import array, eye, exp, random
from numpy.linalg import matrix_power
from numpy.testing import (
assert_allclose, assert_, assert_array_almost_equal, assert_equal,
assert_array_almost_equal_nulp)
from scipy._lib._numpy_compat import suppress_warnings
from scipy.sparse import csc_matrix, SparseEfficiencyWarning
from scipy.sparse.construct import eye as speye
from scipy.sparse.linalg.matfuncs import (expm, _expm,
ProductOperator, MatrixPowerOperator,
_onenorm_matrix_power_nnm)
from scipy.sparse.sputils import matrix
from scipy.linalg import logm
from scipy.special import factorial, binom
import scipy.sparse
import scipy.sparse.linalg
def _burkardt_13_power(n, p):
"""
A helper function for testing matrix functions.
Parameters
----------
n : integer greater than 1
Order of the square matrix to be returned.
p : non-negative integer
Power of the matrix.
Returns
-------
out : ndarray representing a square matrix
A Forsythe matrix of order n, raised to the power p.
"""
# Input validation.
if n != int(n) or n < 2:
raise ValueError('n must be an integer greater than 1')
n = int(n)
if p != int(p) or p < 0:
raise ValueError('p must be a non-negative integer')
p = int(p)
# Construct the matrix explicitly.
a, b = divmod(p, n)
large = np.power(10.0, -n*a)
small = large * np.power(10.0, -n)
return np.diag([large]*(n-b), b) + np.diag([small]*b, b-n)
def test_onenorm_matrix_power_nnm():
np.random.seed(1234)
for n in range(1, 5):
for p in range(5):
M = np.random.random((n, n))
Mp = np.linalg.matrix_power(M, p)
observed = _onenorm_matrix_power_nnm(M, p)
expected = np.linalg.norm(Mp, 1)
assert_allclose(observed, expected)
class TestExpM(object):
def test_zero_ndarray(self):
a = array([[0.,0],[0,0]])
assert_array_almost_equal(expm(a),[[1,0],[0,1]])
def test_zero_sparse(self):
a = csc_matrix([[0.,0],[0,0]])
assert_array_almost_equal(expm(a).toarray(),[[1,0],[0,1]])
def test_zero_matrix(self):
a = matrix([[0.,0],[0,0]])
assert_array_almost_equal(expm(a),[[1,0],[0,1]])
def test_misc_types(self):
A = expm(np.array([[1]]))
assert_allclose(expm(((1,),)), A)
assert_allclose(expm([[1]]), A)
assert_allclose(expm(matrix([[1]])), A)
assert_allclose(expm(np.array([[1]])), A)
assert_allclose(expm(csc_matrix([[1]])).A, A)
B = expm(np.array([[1j]]))
assert_allclose(expm(((1j,),)), B)
assert_allclose(expm([[1j]]), B)
assert_allclose(expm(matrix([[1j]])), B)
assert_allclose(expm(csc_matrix([[1j]])).A, B)
def test_bidiagonal_sparse(self):
A = csc_matrix([
[1, 3, 0],
[0, 1, 5],
[0, 0, 2]], dtype=float)
e1 = math.exp(1)
e2 = math.exp(2)
expected = np.array([
[e1, 3*e1, 15*(e2 - 2*e1)],
[0, e1, 5*(e2 - e1)],
[0, 0, e2]], dtype=float)
observed = expm(A).toarray()
assert_array_almost_equal(observed, expected)
def test_padecases_dtype_float(self):
for dtype in [np.float32, np.float64]:
for scale in [1e-2, 1e-1, 5e-1, 1, 10]:
A = scale * eye(3, dtype=dtype)
observed = expm(A)
expected = exp(scale) * eye(3, dtype=dtype)
assert_array_almost_equal_nulp(observed, expected, nulp=100)
def test_padecases_dtype_complex(self):
for dtype in [np.complex64, np.complex128]:
for scale in [1e-2, 1e-1, 5e-1, 1, 10]:
A = scale * eye(3, dtype=dtype)
observed = expm(A)
expected = exp(scale) * eye(3, dtype=dtype)
assert_array_almost_equal_nulp(observed, expected, nulp=100)
def test_padecases_dtype_sparse_float(self):
# float32 and complex64 lead to errors in spsolve/UMFpack
dtype = np.float64
for scale in [1e-2, 1e-1, 5e-1, 1, 10]:
a = scale * speye(3, 3, dtype=dtype, format='csc')
e = exp(scale) * eye(3, dtype=dtype)
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"Changing the sparsity structure of a csc_matrix is expensive.")
exact_onenorm = _expm(a, use_exact_onenorm=True).toarray()
inexact_onenorm = _expm(a, use_exact_onenorm=False).toarray()
assert_array_almost_equal_nulp(exact_onenorm, e, nulp=100)
assert_array_almost_equal_nulp(inexact_onenorm, e, nulp=100)
def test_padecases_dtype_sparse_complex(self):
# float32 and complex64 lead to errors in spsolve/UMFpack
dtype = np.complex128
for scale in [1e-2, 1e-1, 5e-1, 1, 10]:
a = scale * speye(3, 3, dtype=dtype, format='csc')
e = exp(scale) * eye(3, dtype=dtype)
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"Changing the sparsity structure of a csc_matrix is expensive.")
assert_array_almost_equal_nulp(expm(a).toarray(), e, nulp=100)
def test_logm_consistency(self):
random.seed(1234)
for dtype in [np.float64, np.complex128]:
for n in range(1, 10):
for scale in [1e-4, 1e-3, 1e-2, 1e-1, 1, 1e1, 1e2]:
# make logm(A) be of a given scale
A = (eye(n) + random.rand(n, n) * scale).astype(dtype)
if np.iscomplexobj(A):
A = A + 1j * random.rand(n, n) * scale
assert_array_almost_equal(expm(logm(A)), A)
def test_integer_matrix(self):
Q = np.array([
[-3, 1, 1, 1],
[1, -3, 1, 1],
[1, 1, -3, 1],
[1, 1, 1, -3]])
assert_allclose(expm(Q), expm(1.0 * Q))
def test_integer_matrix_2(self):
# Check for integer overflows
Q = np.array([[-500, 500, 0, 0],
[0, -550, 360, 190],
[0, 630, -630, 0],
[0, 0, 0, 0]], dtype=np.int16)
assert_allclose(expm(Q), expm(1.0 * Q))
Q = csc_matrix(Q)
assert_allclose(expm(Q).A, expm(1.0 * Q).A)
def test_triangularity_perturbation(self):
# Experiment (1) of
# Awad H. Al-Mohy and Nicholas J. Higham (2012)
# Improved Inverse Scaling and Squaring Algorithms
# for the Matrix Logarithm.
A = np.array([
[3.2346e-1, 3e4, 3e4, 3e4],
[0, 3.0089e-1, 3e4, 3e4],
[0, 0, 3.221e-1, 3e4],
[0, 0, 0, 3.0744e-1]],
dtype=float)
A_logm = np.array([
[-1.12867982029050462e+00, 9.61418377142025565e+04,
-4.52485573953179264e+09, 2.92496941103871812e+14],
[0.00000000000000000e+00, -1.20101052953082288e+00,
9.63469687211303099e+04, -4.68104828911105442e+09],
[0.00000000000000000e+00, 0.00000000000000000e+00,
-1.13289322264498393e+00, 9.53249183094775653e+04],
[0.00000000000000000e+00, 0.00000000000000000e+00,
0.00000000000000000e+00, -1.17947533272554850e+00]],
dtype=float)
assert_allclose(expm(A_logm), A, rtol=1e-4)
# Perturb the upper triangular matrix by tiny amounts,
# so that it becomes technically not upper triangular.
random.seed(1234)
tiny = 1e-17
A_logm_perturbed = A_logm.copy()
A_logm_perturbed[1, 0] = tiny
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "Ill-conditioned.*")
A_expm_logm_perturbed = expm(A_logm_perturbed)
rtol = 1e-4
atol = 100 * tiny
assert_(not np.allclose(A_expm_logm_perturbed, A, rtol=rtol, atol=atol))
def test_burkardt_1(self):
# This matrix is diagonal.
# The calculation of the matrix exponential is simple.
#
# This is the first of a series of matrix exponential tests
# collected by John Burkardt from the following sources.
#
# Alan Laub,
# Review of "Linear System Theory" by Joao Hespanha,
# SIAM Review,
# Volume 52, Number 4, December 2010, pages 779--781.
#
# Cleve Moler and Charles Van Loan,
# Nineteen Dubious Ways to Compute the Exponential of a Matrix,
# Twenty-Five Years Later,
# SIAM Review,
# Volume 45, Number 1, March 2003, pages 3--49.
#
# Cleve Moler,
# Cleve's Corner: A Balancing Act for the Matrix Exponential,
# 23 July 2012.
#
# Robert Ward,
# Numerical computation of the matrix exponential
# with accuracy estimate,
# SIAM Journal on Numerical Analysis,
# Volume 14, Number 4, September 1977, pages 600--610.
exp1 = np.exp(1)
exp2 = np.exp(2)
A = np.array([
[1, 0],
[0, 2],
], dtype=float)
desired = np.array([
[exp1, 0],
[0, exp2],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_2(self):
# This matrix is symmetric.
# The calculation of the matrix exponential is straightforward.
A = np.array([
[1, 3],
[3, 2],
], dtype=float)
desired = np.array([
[39.322809708033859, 46.166301438885753],
[46.166301438885768, 54.711576854329110],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_3(self):
# This example is due to Laub.
# This matrix is ill-suited for the Taylor series approach.
# As powers of A are computed, the entries blow up too quickly.
exp1 = np.exp(1)
exp39 = np.exp(39)
A = np.array([
[0, 1],
[-39, -40],
], dtype=float)
desired = np.array([
[
39/(38*exp1) - 1/(38*exp39),
-np.expm1(-38) / (38*exp1)],
[
39*np.expm1(-38) / (38*exp1),
-1/(38*exp1) + 39/(38*exp39)],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_4(self):
# This example is due to Moler and Van Loan.
# The example will cause problems for the series summation approach,
# as well as for diagonal Pade approximations.
A = np.array([
[-49, 24],
[-64, 31],
], dtype=float)
U = np.array([[3, 1], [4, 2]], dtype=float)
V = np.array([[1, -1/2], [-2, 3/2]], dtype=float)
w = np.array([-17, -1], dtype=float)
desired = np.dot(U * np.exp(w), V)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_5(self):
# This example is due to Moler and Van Loan.
# This matrix is strictly upper triangular
# All powers of A are zero beyond some (low) limit.
# This example will cause problems for Pade approximations.
A = np.array([
[0, 6, 0, 0],
[0, 0, 6, 0],
[0, 0, 0, 6],
[0, 0, 0, 0],
], dtype=float)
desired = np.array([
[1, 6, 18, 36],
[0, 1, 6, 18],
[0, 0, 1, 6],
[0, 0, 0, 1],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_6(self):
# This example is due to Moler and Van Loan.
# This matrix does not have a complete set of eigenvectors.
# That means the eigenvector approach will fail.
exp1 = np.exp(1)
A = np.array([
[1, 1],
[0, 1],
], dtype=float)
desired = np.array([
[exp1, exp1],
[0, exp1],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_7(self):
# This example is due to Moler and Van Loan.
# This matrix is very close to example 5.
# Mathematically, it has a complete set of eigenvectors.
# Numerically, however, the calculation will be suspect.
exp1 = np.exp(1)
eps = np.spacing(1)
A = np.array([
[1 + eps, 1],
[0, 1 - eps],
], dtype=float)
desired = np.array([
[exp1, exp1],
[0, exp1],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_8(self):
# This matrix was an example in Wikipedia.
exp4 = np.exp(4)
exp16 = np.exp(16)
A = np.array([
[21, 17, 6],
[-5, -1, -6],
[4, 4, 16],
], dtype=float)
desired = np.array([
[13*exp16 - exp4, 13*exp16 - 5*exp4, 2*exp16 - 2*exp4],
[-9*exp16 + exp4, -9*exp16 + 5*exp4, -2*exp16 + 2*exp4],
[16*exp16, 16*exp16, 4*exp16],
], dtype=float) * 0.25
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_9(self):
# This matrix is due to the NAG Library.
# It is an example for function F01ECF.
A = np.array([
[1, 2, 2, 2],
[3, 1, 1, 2],
[3, 2, 1, 2],
[3, 3, 3, 1],
], dtype=float)
desired = np.array([
[740.7038, 610.8500, 542.2743, 549.1753],
[731.2510, 603.5524, 535.0884, 542.2743],
[823.7630, 679.4257, 603.5524, 610.8500],
[998.4355, 823.7630, 731.2510, 740.7038],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_10(self):
# This is Ward's example #1.
# It is defective and nonderogatory.
A = np.array([
[4, 2, 0],
[1, 4, 1],
[1, 1, 4],
], dtype=float)
assert_allclose(sorted(scipy.linalg.eigvals(A)), (3, 3, 6))
desired = np.array([
[147.8666224463699, 183.7651386463682, 71.79703239999647],
[127.7810855231823, 183.7651386463682, 91.88256932318415],
[127.7810855231824, 163.6796017231806, 111.9681062463718],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_11(self):
# This is Ward's example #2.
# It is a symmetric matrix.
A = np.array([
[29.87942128909879, 0.7815750847907159, -2.289519314033932],
[0.7815750847907159, 25.72656945571064, 8.680737820540137],
[-2.289519314033932, 8.680737820540137, 34.39400925519054],
], dtype=float)
assert_allclose(scipy.linalg.eigvalsh(A), (20, 30, 40))
desired = np.array([
[
5.496313853692378E+15,
-1.823188097200898E+16,
-3.047577080858001E+16],
[
-1.823188097200899E+16,
6.060522870222108E+16,
1.012918429302482E+17],
[
-3.047577080858001E+16,
1.012918429302482E+17,
1.692944112408493E+17],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_12(self):
# This is Ward's example #3.
# Ward's algorithm has difficulty estimating the accuracy
# of its results.
A = np.array([
[-131, 19, 18],
[-390, 56, 54],
[-387, 57, 52],
], dtype=float)
assert_allclose(sorted(scipy.linalg.eigvals(A)), (-20, -2, -1))
desired = np.array([
[-1.509644158793135, 0.3678794391096522, 0.1353352811751005],
[-5.632570799891469, 1.471517758499875, 0.4060058435250609],
[-4.934938326088363, 1.103638317328798, 0.5413411267617766],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_13(self):
# This is Ward's example #4.
# This is a version of the Forsythe matrix.
# The eigenvector problem is badly conditioned.
# Ward's algorithm has difficulty esimating the accuracy
# of its results for this problem.
#
# Check the construction of one instance of this family of matrices.
A4_actual = _burkardt_13_power(4, 1)
A4_desired = [[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[1e-4, 0, 0, 0]]
assert_allclose(A4_actual, A4_desired)
# Check the expm for a few instances.
for n in (2, 3, 4, 10):
# Approximate expm using Taylor series.
# This works well for this matrix family
# because each matrix in the summation,
# even before dividing by the factorial,
# is entrywise positive with max entry 10**(-floor(p/n)*n).
k = max(1, int(np.ceil(16/n)))
desired = np.zeros((n, n), dtype=float)
for p in range(n*k):
Ap = _burkardt_13_power(n, p)
assert_equal(np.min(Ap), 0)
assert_allclose(np.max(Ap), np.power(10, -np.floor(p/n)*n))
desired += Ap / factorial(p)
actual = expm(_burkardt_13_power(n, 1))
assert_allclose(actual, desired)
def test_burkardt_14(self):
# This is Moler's example.
# This badly scaled matrix caused problems for MATLAB's expm().
A = np.array([
[0, 1e-8, 0],
[-(2e10 + 4e8/6.), -3, 2e10],
[200./3., 0, -200./3.],
], dtype=float)
desired = np.array([
[0.446849468283175, 1.54044157383952e-09, 0.462811453558774],
[-5743067.77947947, -0.0152830038686819, -4526542.71278401],
[0.447722977849494, 1.54270484519591e-09, 0.463480648837651],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_pascal(self):
# Test pascal triangle.
# Nilpotent exponential, used to trigger a failure (gh-8029)
for scale in [1.0, 1e-3, 1e-6]:
for n in range(120):
A = np.diag(np.arange(1, n + 1), -1) * scale
B = expm(A)
sc = scale**np.arange(n, -1, -1)
if np.any(sc < 1e-300):
continue
got = B
expected = binom(np.arange(n + 1)[:,None],
np.arange(n + 1)[None,:]) * sc[None,:] / sc[:,None]
err = abs(expected - got).max()
atol = 1e-13 * abs(expected).max()
assert_allclose(got, expected, atol=atol)
class TestOperators(object):
def test_product_operator(self):
random.seed(1234)
n = 5
k = 2
nsamples = 10
for i in range(nsamples):
A = np.random.randn(n, n)
B = np.random.randn(n, n)
C = np.random.randn(n, n)
D = np.random.randn(n, k)
op = ProductOperator(A, B, C)
assert_allclose(op.matmat(D), A.dot(B).dot(C).dot(D))
assert_allclose(op.T.matmat(D), (A.dot(B).dot(C)).T.dot(D))
def test_matrix_power_operator(self):
random.seed(1234)
n = 5
k = 2
p = 3
nsamples = 10
for i in range(nsamples):
A = np.random.randn(n, n)
B = np.random.randn(n, k)
op = MatrixPowerOperator(A, p)
assert_allclose(op.matmat(B), matrix_power(A, p).dot(B))
assert_allclose(op.T.matmat(B), matrix_power(A, p).T.dot(B))
| 36.390681
| 91
| 0.53792
|
3cf632a1343ff2abd90523065a8afd423d2c463d
| 14,114
|
py
|
Python
|
built-in/TensorFlow/Official/nlp/Textcnn_ID0123_For_Tensorflow/run_cnn.py
|
Ascend/modelzoo
|
f018cfed33dbb1cc2110b9ea2e233333f71cc509
|
[
"Apache-2.0"
] | 12
|
2020-12-13T08:34:24.000Z
|
2022-03-20T15:17:17.000Z
|
built-in/TensorFlow/Official/nlp/Textcnn_ID0123_For_Tensorflow/run_cnn.py
|
Ascend/modelzoo
|
f018cfed33dbb1cc2110b9ea2e233333f71cc509
|
[
"Apache-2.0"
] | 1
|
2022-01-20T03:11:05.000Z
|
2022-01-20T06:53:39.000Z
|
built-in/TensorFlow/Official/nlp/Textcnn_ID0123_For_Tensorflow/run_cnn.py
|
Ascend/modelzoo
|
f018cfed33dbb1cc2110b9ea2e233333f71cc509
|
[
"Apache-2.0"
] | 2
|
2021-07-10T12:40:46.000Z
|
2021-12-17T07:55:15.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from npu_bridge.npu_init import *
#from npu_bridge import *
import os
import sys
import time
from datetime import timedelta
import pickle
import numpy as np
import tensorflow as tf
from sklearn import metrics
from cnn_model import TCNNConfig, TextCNN
from data.cnews_loader import read_vocab, read_category, batch_iter, process_file, build_vocab
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--save_dir', dest='save_dir', default='checkpoints/textcnn')
parser.add_argument('--data_path', dest='data_path', default='./data/cnews', help='path of the dataset')
parser.add_argument('--precision_mode', dest='precision_mode', default='allow_fp32_to_fp16', help='precision mode')
parser.add_argument('--over_dump', dest='over_dump', default='False', help='if or not over detection')
parser.add_argument('--over_dump_path', dest='over_dump_path', default='./overdump', help='over dump path')
parser.add_argument('--data_dump_flag', dest='data_dump_flag', default='False', help='data dump flag')
parser.add_argument('--data_dump_step', dest='data_dump_step', default='10', help='data dump step')
parser.add_argument('--data_dump_path', dest='data_dump_path', default='./datadump', help='data dump path')
parser.add_argument('--profiling', dest='profiling', default='False', help='if or not profiling for performance debug')
parser.add_argument('--profiling_dump_path', dest='profiling_dump_path', default='./profiling', help='profiling path')
parser.add_argument('--autotune', dest='autotune', default='False', help='whether to enable autotune, default is False')
parser.add_argument('--npu_loss_scale', dest='npu_loss_scale', type=int, default=1)
parser.add_argument('--mode', dest='mode', default='train', choices=('train', 'test', 'train_and_eval'))
parser.add_argument('--batch_size', dest='batch_size', type=int, default=64)
parser.add_argument('--learning_rate', dest='learning_rate', type=float, default=0.001)
parser.add_argument('--num_epochs', dest='num_epochs', type=int, default=10)
args = parser.parse_args()
base_dir = args.data_path
train_dir = os.path.join(base_dir, 'cnews.train.txt')
test_dir = os.path.join(base_dir, 'cnews.test.txt')
val_dir = os.path.join(base_dir, 'cnews.val.txt')
vocab_dir = os.path.join(base_dir, 'cnews.vocab.txt')
save_dir = args.save_dir
save_path = os.path.join(save_dir, 'best_validation')
def get_time_dif(start_time):
'获取已使用时间'
end_time = time.time()
time_dif = (end_time - start_time)
return timedelta(seconds=int(round(time_dif))), time_dif
def feed_data(x_batch, y_batch, keep_prob):
feed_dict = {
model.input_x: x_batch,
model.input_y: y_batch,
model.keep_prob: keep_prob
}
return feed_dict
def evaluate(sess, x,y):
"""评估在某一数据上的准确率和损失"""
total_loss = 0.0
total_acc = 0.0
data_len = len(x)
batch_train = batch_iter_(x, y,256)
for x_batch, y_batch in batch_train:
batch_len = len(x_batch)
feed_dict = feed_data(x_batch, y_batch, 1.0)
(loss, acc) = sess.run([model.loss, model.acc], feed_dict=feed_dict)
total_loss += (loss * batch_len)
total_acc += (acc * batch_len)
return ((total_loss / data_len), (total_acc / data_len))
class data_load(object):
def __init__(self, sess,x,y,is_train=True):
with tf.device('/cpu:0'):
self.x = x
self.y = y
self.x_ = tf.placeholder(self.x.dtype, self.x.shape)
self.y_ = tf.placeholder(self.y.dtype, self.y.shape)
self.sess = sess
dataset = tf.data.Dataset.from_tensor_slices((self.x_, self.y_))
if is_train:
dataset = dataset.shuffle(len(self.x))
dataset = dataset.repeat()
dataset = dataset.batch(len(self.x))
else:
dataset = dataset.batch(len(self.x))
dataset = dataset.prefetch(2)
self.iterator = dataset.make_initializable_iterator()
self.next = self.iterator.get_next()
self.sess.run(self.iterator.initializer, feed_dict={self.x_: self.x,self.y_: self.y})
def replay(self):
self.sess.run(self.iterator.initializer, feed_dict={self.x_: self.x,self.y_: self.y})
def batch_iter_(x, y, batch_size=64):
data_len = len(x)
num_batch = int((data_len - 1) / batch_size) + 1
for i in range(num_batch):
start_id = i * batch_size
end_id = min((i + 1) * batch_size, data_len)
yield x[start_id:end_id], y[start_id:end_id]
def train():
print('Configuring TensorBoard and Saver...')
tensorboard_dir = 'tensorboard/textcnn'
if (not os.path.exists(tensorboard_dir)):
os.makedirs(tensorboard_dir)
tf.summary.scalar('loss', model.loss)
tf.summary.scalar('accuracy', model.acc)
merged_summary = tf.summary.merge_all()
writer = tf.summary.FileWriter(tensorboard_dir)
saver = tf.train.Saver()
if (not os.path.exists(save_dir)):
os.makedirs(save_dir)
print('Loading training and validation data...')
start_time = time.time()
(x_train, y_train) = process_file(train_dir, word_to_id, cat_to_id, config.seq_length)
(x_val, y_val) = process_file(val_dir, word_to_id, cat_to_id, config.seq_length)
time_dif = get_time_dif(start_time)
print('Time usage:', time_dif)
############################ modify for run on npu ###############################
from npu_bridge.estimator import npu_ops
from tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig
sess_config = tf.ConfigProto()
custom_op = sess_config.graph_options.rewrite_options.custom_optimizers.add()
custom_op.name = "NpuOptimizer"
custom_op.parameter_map["use_off_line"].b = True # 必须显示开启,在昇腾AI处理器执行训练
sess_config.graph_options.rewrite_options.remapping = RewriterConfig.OFF # 必须显示关闭remap
custom_op.parameter_map["dynamic_input"].b = True
custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("allow_mix_precision")
custom_op.parameter_map["dynamic_graph_execute_mode"].s = tf.compat.as_bytes("lazy_recompile")
#custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes(args.precision_mode)
if args.data_dump_flag.strip() == "True":
custom_op.parameter_map["enable_dump"].b = True
custom_op.parameter_map["dump_path"].s = tf.compat.as_bytes(args.data_dump_path)
custom_op.parameter_map["dump_step"].s = tf.compat.as_bytes(args.data_dump_step)
custom_op.parameter_map["dump_mode"].s = tf.compat.as_bytes("all")
if args.over_dump.strip() == "True":
# dump_path:dump数据存放路径,该参数指定的目录需要在启动训练的环境上(容器或Host侧)提前创建且确保安装时配置的运行用户具有读写权限
custom_op.parameter_map["dump_path"].s = tf.compat.as_bytes(args.over_dump_path)
# enable_dump_debug:是否开启溢出检测功能
custom_op.parameter_map["enable_dump_debug"].b = True
# dump_debug_mode:溢出检测模式,取值:all/aicore_overflow/atomic_overflow
custom_op.parameter_map["dump_debug_mode"].s = tf.compat.as_bytes("all")
if args.profiling.strip() == "True":
custom_op.parameter_map["profiling_mode"].b = False
profilingvalue = (
'{"output":"%s","training_trace":"on","task_trace":"on","aicpu":"on","fp_point":"","bp_point":""}' % (
args.profiling_dump_path))
custom_op.parameter_map["profiling_options"].s = tf.compat.as_bytes(profilingvalue)
############################ modify for run on npu ###############################
print("CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC Finish")
session = tf.Session(config=sess_config)
session.run(tf.global_variables_initializer())
writer.add_graph(session.graph)
train_len = len(x_train)
val_len = len(x_val)
train_data = data_load(session,x_train,y_train)
val = data_load(session,x_val,y_val,False)
x_v, y_v = session.run(val.next)
tf.io.write_graph(session.graph_def, 'checkpoints', 'train.pbtxt')
print('Training and evaluating...')
start_time = time.time()
data_time = 0
total_batch = 0
best_acc_val = 0.0
last_improved = 0
require_improvement = 10000
total_feed = 0
total_summary = 0
total_val = 0
total_save = 0
total_train = 0
flag = False
for epoch in range(config.num_epochs):
print('Epoch:', (epoch + 1))
x, y = session.run(train_data.next)
batch_train = batch_iter_(x, y, config.batch_size)
for (x_batch, y_batch) in batch_train:
feed_dict = feed_data(x_batch, y_batch, config.dropout_keep_prob)
#if total_batch % config.save_per_batch == 0:
# 每多少轮次将训练结果写入tensorboard scalar
#s = session.run(merged_summary, feed_dict=feed_dict)
#writer.add_summary(s, total_batch)
if ((total_batch % config.print_per_batch) == 0):
feed_dict[model.keep_prob] = 1.0
(loss_train, acc_train) = session.run([model.loss, model.acc], feed_dict=feed_dict)
(loss_val, acc_val) = evaluate(session, x_v, y_v)
if (acc_val > best_acc_val):
best_acc_val = acc_val
last_improved = total_batch
saver.save(sess=session, save_path=save_path)
improved_str = '*'
else:
improved_str = ''
time_dif, time_sec = get_time_dif(start_time)
msg = ('Iter: {0:>6}, Train Loss: {1:>6.2}, Train Acc: {2:>7.2%},' + ' Val Loss: {3:>6.2}, Val Acc: {4:>7.2%}, Time: {5} {6} ({7})')
print(msg.format(total_batch, loss_train, acc_train, loss_val, acc_val, time_dif, improved_str, time_sec))
feed_dict[model.keep_prob] = config.dropout_keep_prob
session.run(model.optim, feed_dict=feed_dict)
#time_dif = get_time_dif(start_time)
#print("step:%d, time:%s"%(total_batch, time_dif))
total_batch += 1
if ((total_batch - last_improved) > require_improvement):
# 验证集正确率长期不提升,提前结束训练
print('No optimization for a long time, auto-stopping...')
flag = True
break # 跳出循环
if flag:
break
def test():
print('Loading test data...')
x_test, y_test = process_file(test_dir, word_to_id, cat_to_id, config.seq_length)
sess_config = tf.ConfigProto()
custom_op = sess_config.graph_options.rewrite_options.custom_optimizers.add()
custom_op.name = "NpuOptimizer"
sess_config.graph_options.rewrite_options.remapping = RewriterConfig.OFF # 必须显示关闭remap
custom_op.parameter_map["dynamic_input"].b = True
custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("allow_mix_precision")
custom_op.parameter_map["dynamic_graph_execute_mode"].s = tf.compat.as_bytes("lazy_recompile")
session = tf.Session(config=sess_config)
session.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(sess=session, save_path=save_path)
start_time = time.time()
print('Testing...')
(loss_test, acc_test) = evaluate(session, x_test, y_test)
msg = 'Test Loss: {0:>6.2}, Test Acc: {1:>7.2%}'
print(msg.format(loss_test, acc_test))
batch_size = 256
data_len = len(x_test)
num_batch = (int(((data_len - 1) / batch_size)) + 1)
y_test_cls = np.argmax(y_test, 1)
y_pred_cls = np.zeros(shape=len(x_test), dtype=np.int32)
for i in range(num_batch):
start_id = (i * batch_size)
end_id = min(((i + 1) * batch_size), data_len)
feed_dict = {model.input_x: x_test[start_id:end_id], model.keep_prob: 1.0}
y_pred_cls[start_id:end_id] = session.run(model.y_pred_cls, feed_dict=feed_dict)
print('Precision, Recall and F1-Score...')
print(metrics.classification_report(y_test_cls, y_pred_cls, target_names=categories))
print('Confusion Matrix...')
cm = metrics.confusion_matrix(y_test_cls, y_pred_cls)
print(cm)
time_dif = get_time_dif(start_time)
print('Time usage:', time_dif)
if (__name__ == '__main__'):
print('Configuring CNN model...')
config = TCNNConfig()
config.learning_rate = args.learning_rate
config.batch_size = args.batch_size
config.num_epochs = args.num_epochs
config.npu_loss_scale = args.npu_loss_scale
if (not os.path.exists(vocab_dir)):
build_vocab(train_dir, vocab_dir, config.vocab_size)
(categories, cat_to_id) = read_category()
(words, word_to_id) = read_vocab(vocab_dir)
config.vocab_size = len(words)
model = TextCNN(config)
if (args.mode == 'train'):
train()
elif (args.mode == 'test'):
test()
else:
train()
test()
| 47.046667
| 148
| 0.669335
|
3f5fed290bca2ecf56896248ebe20b810ccd9aa9
| 5,983
|
py
|
Python
|
test/integration/test_waf_rules_api_v1.py
|
KumarGanesanIBM/networking-python-sdk
|
c00801b8cb908496bb1b8635ee0a53513af57639
|
[
"Apache-2.0"
] | null | null | null |
test/integration/test_waf_rules_api_v1.py
|
KumarGanesanIBM/networking-python-sdk
|
c00801b8cb908496bb1b8635ee0a53513af57639
|
[
"Apache-2.0"
] | null | null | null |
test/integration/test_waf_rules_api_v1.py
|
KumarGanesanIBM/networking-python-sdk
|
c00801b8cb908496bb1b8635ee0a53513af57639
|
[
"Apache-2.0"
] | 1
|
2020-07-30T10:39:28.000Z
|
2020-07-30T10:39:28.000Z
|
# -*- coding: utf-8 -*-
# (C) Copyright IBM Corp. 2020.
"""
Integration test code to execute waf rules api functions
"""
import os
import unittest
from ibm_cloud_networking_services.waf_rules_api_v1 import WafRulesApiV1
from ibm_cloud_networking_services.waf_rule_packages_api_v1 import WafRulePackagesApiV1
class TestWafRulesApiV1 (unittest.TestCase):
def setUp(self):
""" test case setup """
self.endpoint = os.getenv("API_ENDPOINT")
self.crn = os.getenv("CRN")
self.zone_id = os.getenv("ZONE_ID")
# create waf rules api record class object
self.wafRulesApi = WafRulesApiV1.new_instance(
crn=self.crn, zone_id=self.zone_id, service_name="cis_services")
self.wafRulesApi.set_service_url(self.endpoint)
self.wafRulePackagesApi = WafRulePackagesApiV1.new_instance(
crn=self.crn, zone_id=self.zone_id, service_name="cis_services")
self.wafRulePackagesApi.set_service_url(self.endpoint)
def tearDown(self):
""" tear down """
# Delete the resources
print("Clean up complete")
################## list_waf_rules ###################
def test_1_waf_rules(self):
""" test for success """
response = self.wafRulePackagesApi.list_waf_packages()
assert response is not None and response.result.get("success") is True
results = response.result.get("result")
all_waf_pkg_ids = []
# extract all package ids
for i in results:
all_waf_pkg_ids.append(str(i.get("id")))
check = True
# list waf rule for each package id
for i in all_waf_pkg_ids:
self.package_id = str(i)
response = self.wafRulesApi.list_waf_rules(
package_id=self.package_id).get_result()
assert response is not None and response.get('success') is True
if len(response.get("result")) != 0 and check is True:
# store a single rule id details, to be used in later test cases
rule_res = response.get("result")
self.identifier = rule_res[0].get("id")
self.mode = rule_res[0].get("mode")
self.priority = rule_res[0].get("priority")
self.group_id = rule_res[0].get("group").get("id")
self.description = rule_res[0].get("description")
self.pkg_id = self.package_id
check = False
"""list particular waf rules on the basis of mode, priority and other params"""
self.match = "all"
self.order = "status"
self.direction = "asc"
self.page = 1
self.per_page = 10
response = self.wafRulesApi.list_waf_rules(package_id=self.pkg_id, mode=self.mode, priority=self.priority, match=self.match, order=self.order,
group_id=self.group_id, description=self.description, direction=self.direction, page=self.page, per_page=self.per_page).get_result()
assert response is not None and response.get("success") is True
"""get waf rule"""
response = self.wafRulesApi.get_waf_rule(
package_id=self.pkg_id, identifier=self.identifier).get_result()
assert response is not None and response.get('success') is True
"""update waf rule"""
cis_modes = ['default', 'disable', 'simulate', 'block', 'challenge']
owasp_modes = ['on', 'off']
if self.mode in owasp_modes:
mode_list = owasp_modes
mode_name = "owasp"
else:
mode_list = cis_modes
mode_name = "cis"
for m in mode_list:
if m != self.mode:
new_mode = m
break
if mode_name == "cis":
self.cis = {"mode": new_mode}
self.owasp = None
else:
self.owasp = {"mode": new_mode}
self.cis = None
response = self.wafRulesApi.update_waf_rule(
package_id=self.pkg_id, identifier=self.identifier, cis=self.cis, owasp=self.owasp).get_result()
assert response is not None and response.get('success') is True
################## Negative test cases ###################
def test_2_waf_rules(self):
self.identifier = ""
self.package_id = ""
self.cis = ""
self.owasp = ""
""" list waf rule method without package_id """
with self.assertRaises(ValueError) as val:
self.wafRulesApi.list_waf_rules(package_id=None).get_result()
self.assertEqual(val.exception.msg, 'package_id must be provided')
""" get waf rule method without package_id """
with self.assertRaises(ValueError) as val:
self.wafRulesApi.get_waf_rule(
package_id=None, identifier=self.identifier).get_result()
self.assertEqual(val.exception.msg, 'package_id must be provided')
""" get waf rule method without identifier """
with self.assertRaises(ValueError) as val:
self.wafRulesApi.get_waf_rule(
package_id=self.package_id, identifier=None).get_result()
self.assertEqual(val.exception.msg, 'identifier must be provided')
""" update waf rule method without package_id """
with self.assertRaises(ValueError) as val:
self.wafRulesApi.update_waf_rule(
package_id=None, identifier=self.identifier, cis=self.cis, owasp=self.owasp).get_result()
self.assertEqual(val.exception.msg, 'package_id must be provided')
""" update waf rule method without identifier """
with self.assertRaises(ValueError) as val:
self.wafRulesApi.update_waf_rule(
package_id=self.package_id, identifier=None, cis=self.cis, owasp=self.owasp).get_result()
self.assertEqual(val.exception.msg, 'identifier must be provided')
if __name__ == '__main__':
unittest.main()
| 42.133803
| 183
| 0.614575
|
ffdd01c1f9190954f5f7560fd730a1205892407b
| 4,460
|
py
|
Python
|
Share/models.py
|
PPPokerFace/PokerFace
|
4d28a3bb093200669f2f7b337a907f035b650032
|
[
"MIT"
] | 1
|
2019-01-06T08:33:24.000Z
|
2019-01-06T08:33:24.000Z
|
Share/models.py
|
PPPokerFace/PokerFace
|
4d28a3bb093200669f2f7b337a907f035b650032
|
[
"MIT"
] | null | null | null |
Share/models.py
|
PPPokerFace/PokerFace
|
4d28a3bb093200669f2f7b337a907f035b650032
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
class Test(models.Model):
"""测试类"""
class Meta:
verbose_name_plural = '测试'
name = models.CharField(max_length=16)
age = models.IntegerField()
class Student(models.Model):
"""学生类"""
class Meta:
verbose_name_plural = '学生管理'
verbose_name = '学生'
stu_number = models.CharField(max_length=10, verbose_name='学号', primary_key=True)
province = models.CharField(max_length=10, verbose_name='省份')
class_stu = models.ForeignKey('Class', on_delete=models.CASCADE, verbose_name='班级')
college = models.ForeignKey('College', on_delete=models.CASCADE, verbose_name='学院')
name = models.CharField(max_length=16, verbose_name='姓名')
GENGER_CHOLICE = (
('男', '男'),
('女', '女'),
)
gender = models.CharField(max_length=2, choices=GENGER_CHOLICE, verbose_name='性别')
id_number = models.CharField(max_length=18, verbose_name='身份证')
dormitory = models.ForeignKey('Dormitory', on_delete=models.CASCADE, verbose_name='宿舍')
def __str__(self):
return str(self.stu_number)
class Dormitory(models.Model):
"""宿舍楼"""
class Meta:
verbose_name_plural = '宿舍管理'
verbose_name = '宿舍'
name = models.CharField(max_length=20, primary_key=True)
def __str__(self):
return str(self.name)
# class DormitoryNumber(models.Model):
# """宿舍号"""
# number = models.IntegerField()
# dormitory = models.ForeignKey('Dormitory', on_delete=models.CASCADE)
# def __str__(self):
# return str(self.dormitory.number)+"-"+str(self.number)
class Class(models.Model):
"""班级"""
class Meta:
verbose_name_plural = '班级管理'
verbose_name = '班级'
name = models.CharField(max_length=16, primary_key=True)
college = models.ForeignKey('College', on_delete=models.CASCADE)
# master = models.CharField(max_length=16, null=True) # 班主任
# instructor = models.CharField(max_length=16, null=True) # 辅导员
def __str__(self):
return self.name
class College(models.Model):
"""学院"""
class Meta:
verbose_name_plural = '学院管理'
verbose_name = '学院'
name = models.CharField(max_length=16, primary_key=True)
# president = models.CharField(max_length=16, null=True)
def __str__(self):
return self.name
class YearCheckInEvent(models.Model):
"""每年的签到事件"""
class Meta:
verbose_name_plural = '签到事件管理'
verbose_name = '签到事件'
year = models.IntegerField(unique=True,verbose_name='年份')
start_time = models.DateTimeField(verbose_name='开始时间')
end_time = models.DateTimeField(verbose_name='结束时间')
student = models.ManyToManyField(Student, through='YearCheckInData', verbose_name='学生')
def __str__(self):
return str(self.year)
class YearCheckInData(models.Model):
"""每年的签到事件数据"""
class Meta:
verbose_name_plural = '签到数据管理'
verbose_name = '签到数据'
year_check_in_event = models.ForeignKey('YearCheckInEvent', on_delete=models.CASCADE, blank=True, null=True,verbose_name='年份')
student = models.ForeignKey('Student', on_delete=models.CASCADE, blank=True, null=True,verbose_name='学生')
time = models.DateTimeField(blank=True, null=True,verbose_name='签到时间')
checked = models.BooleanField(default=False,verbose_name='是否签到')
def __str__(self):
return str(self.student)
class PublicInfo(models.Model):
"""信息公示"""
class Meta:
verbose_name_plural = '信息公示管理'
verbose_name = '公告'
title = models.CharField(max_length=50)
text = models.CharField(max_length=100)
class PostCard(models.Model):
"""帖子"""
class Meta:
verbose_name_plural = '帖子管理'
verbose_name = '帖子'
student = models.ForeignKey('Student', on_delete=models.CASCADE)
title = models.CharField(max_length=20)
text = models.CharField(max_length=100)
time = models.DateTimeField()
class Comment(models.Model):
"""评论"""
class Meta:
verbose_name_plural = '评论管理'
verbose_name = '评论'
student = models.ForeignKey('Student', on_delete=models.CASCADE)
post_card = models.ForeignKey('PostCard', on_delete=models.CASCADE)
text = models.CharField(max_length=100)
time = models.DateTimeField()
| 28.407643
| 131
| 0.646413
|
a10648b3cfb9609f773302d36e8f5fe6ab423b61
| 436
|
py
|
Python
|
HER2/Practice Testing/Epoch Experiment/Run_10_4.py
|
raktim-mondol/DeepLearningCamelyon
|
443fc5d17f577b99fc3b17cc1adc7000ccab642e
|
[
"CC0-1.0"
] | 70
|
2018-12-14T07:16:41.000Z
|
2022-02-03T07:40:23.000Z
|
HER2/Practice Testing/Epoch Experiment/Run_10_4.py
|
raktim-mondol/DeepLearningCamelyon
|
443fc5d17f577b99fc3b17cc1adc7000ccab642e
|
[
"CC0-1.0"
] | null | null | null |
HER2/Practice Testing/Epoch Experiment/Run_10_4.py
|
raktim-mondol/DeepLearningCamelyon
|
443fc5d17f577b99fc3b17cc1adc7000ccab642e
|
[
"CC0-1.0"
] | 24
|
2019-02-18T16:21:25.000Z
|
2022-03-06T15:39:45.000Z
|
execfile('1_import_pkgs.py')
execfile('2_epochs_10.py')
execfile('3_get_model.py')
execfile('4_get_data.py')
execfile('5_divide_data.py')
execfile('6_fit_model.py')
execfile('7_learning_curve.py')
execfile('8_accuracy_10_4.py')
execfile('9_confusion_matrix_10_4.py')
#execfile('10_images_misclassified.py')
#execfile('11_mono_correct.py')
#execfile('12_poly_correct.py')
#print(model.layers)
#print(model.inputs)
#print(model.outputs)
| 25.647059
| 39
| 0.791284
|
9d4f204354c9c89689a04ba927381246e4f76728
| 265
|
py
|
Python
|
2923.py
|
ShawonBarman/URI-Online-judge-Ad-Hoc-level-problem-solution-in-python
|
9a0f0ad5efd4a9e73589c357ab4b34b7c73a11da
|
[
"MIT"
] | 1
|
2022-01-14T08:45:32.000Z
|
2022-01-14T08:45:32.000Z
|
2923.py
|
ShawonBarman/URI-Online-judge-Ad-Hoc-level-problem-solution-in-python
|
9a0f0ad5efd4a9e73589c357ab4b34b7c73a11da
|
[
"MIT"
] | null | null | null |
2923.py
|
ShawonBarman/URI-Online-judge-Ad-Hoc-level-problem-solution-in-python
|
9a0f0ad5efd4a9e73589c357ab4b34b7c73a11da
|
[
"MIT"
] | null | null | null |
while True:
try:
t, a, w, c = map(int, input().split())
if (a*100)/t >= c:
print("critical")
elif (a*100)/t >= w:
print("warning")
else:
print("OK")
except EOFError:
break
| 24.090909
| 47
| 0.396226
|
57871e72ca122d2fb64b261ae1008c66f96d8c27
| 1,087
|
py
|
Python
|
scout/build/hpo.py
|
mhkc/scout
|
a7162f28c0f3490c3f3376268118fa8e6072a9db
|
[
"BSD-3-Clause"
] | 111
|
2015-01-15T11:53:20.000Z
|
2022-03-26T19:55:24.000Z
|
scout/build/hpo.py
|
mhkc/scout
|
a7162f28c0f3490c3f3376268118fa8e6072a9db
|
[
"BSD-3-Clause"
] | 2,995
|
2015-01-15T16:14:20.000Z
|
2022-03-31T13:36:32.000Z
|
scout/build/hpo.py
|
mhkc/scout
|
a7162f28c0f3490c3f3376268118fa8e6072a9db
|
[
"BSD-3-Clause"
] | 55
|
2015-05-31T19:09:49.000Z
|
2021-11-01T10:50:31.000Z
|
import logging
from scout.models.phenotype_term import HpoTerm
LOG = logging.getLogger(__name__)
def build_hpo_term(hpo_info):
"""Build a hpo_term object
Check that the information is correct and add the correct hgnc ids to the
array of genes.
Args:
hpo_info(dict)
Returns:
hpo_obj(scout.models.HpoTerm): A dictionary with hpo information
"""
try:
hpo_id = hpo_info["hpo_id"]
except KeyError:
raise KeyError("Hpo terms has to have a hpo_id")
LOG.debug("Building hpo term %s", hpo_id)
# Add description to HPO term
try:
description = hpo_info["description"]
except KeyError:
raise KeyError("Hpo terms has to have a description")
hpo_obj = HpoTerm(
hpo_id=hpo_id,
description=description,
genes=list(hpo_info.get("genes", set())),
ancestors=list(hpo_info.get("ancestors", set())),
all_ancestors=list(hpo_info.get("all_ancestors", set())),
children=list(hpo_info.get("children", set())),
)
return hpo_obj
| 24.155556
| 77
| 0.639374
|
486ffd9d69e25c95f4f39c30a8fc771ecddaa615
| 13,308
|
py
|
Python
|
market/backtest.py
|
adammorley/ib
|
a4c985d5a57628c253e4d8a422d03b110ebc23ec
|
[
"Apache-2.0"
] | 1
|
2020-10-03T15:03:57.000Z
|
2020-10-03T15:03:57.000Z
|
market/backtest.py
|
adammorley/ib
|
a4c985d5a57628c253e4d8a422d03b110ebc23ec
|
[
"Apache-2.0"
] | null | null | null |
market/backtest.py
|
adammorley/ib
|
a4c985d5a57628c253e4d8a422d03b110ebc23ec
|
[
"Apache-2.0"
] | 1
|
2020-06-10T02:51:07.000Z
|
2020-06-10T02:51:07.000Z
|
import logging
import re
from market import bars
from market import order
def anotateBars(histBars):
newBars = []
for i in range(0, len(histBars)):
newBars.append(makeBar(histBars[i]))
newBars[i].anotate()
logging.info('got %d bars', len(newBars))
return newBars
def makeBar(histBar):
bar = bars.Bar(0)
bar.open = histBar.open
bar.close = histBar.close
bar.high = histBar.high
bar.low = histBar.low
return bar
def getNextBar(dataStream, index):
return dataStream[index]
def setupThreeBar(dataStream, period):
index = len(dataStream)-1 - period *24*60
dataStore = bars.BarSet()
dataStore.first = getNextBar(dataStream, index)
dataStore.second = getNextBar(dataStream, index+1)
dataStore.third = getNextBar(dataStream, index+2)
return index+3, dataStore
def backtest(wc, dataStream, dataStore, conf, period):
totals = {'gl': 0, 'tf': 0, 'mf': 0, 'op': 0, 'lo': 0}
positions = []
startIndex = None
# which data point in the dataStream/bar set to evaluate on this round about enter or not
if conf.detector == 'threeBarPattern':
startIndex, dataStore = setupThreeBar(dataStream, period)
elif conf.detector == 'Crossover':
# FIXME: might be a bug here
# we just stored (at init) the last EMA calculated, eg we are examining curClosePriceIndex
startIndex = dataStore.curEmaIndex + 1
for i in range(startIndex, len(dataStream)-1):
# first, see if any positions changed
logging.info('number of positions open: {}'.format(len(positions)))
positions, totals = checkPositions(wc, positions, conf, dataStore, dataStream, i, totals)
# see if we calculated an entryPrice
entryAction, entryPrice = None, None
if conf.detector == 'threeBarPattern':
entryPrice = dataStore.analyze()
elif conf.detector == 'Crossover':
entryAction, entryPrice = dataStore.checkForEntry(dataStream)
if entryPrice is not None:
od = order.OrderDetails(entryPrice, conf, wc, entryAction)
od.config.qty = order.calculateQty(od)
logging.warn('found an order: %s %s', od, dataStore)
if len(positions) < od.config.openPositions:
# checking whether the position opened and closed in the same bar
amount = None
orders = order.CreateBracketOrder(od)
# need to use real values (not offsets) for position checker
if orders.stopOrder.orderType == 'TRAIL': # have to store for position tracking
if od.config.stopPercent is not None:
if orders.entryOrder.action == 'BUY':
orders.stopOrder.auxPrice = order.Round( orders.entryOrder.lmtPrice *(100.0 - orders.stopOrder.trailingPercent)/100.0, od.wContract.priceIncrement)
else:
orders.stopOrder.auxPrice = order.Round( orders.entryOrder.lmtPrice *(100.0 + orders.stopOrder.trailingPercent)/100.0, od.wContract.priceIncrement)
elif od.config.stopTarget:
if orders.entryOrder.action == 'BUY':
orders.stopOrder.auxPrice = orders.entryOrder.lmtPrice - od.config.stopTarget
else:
orders.stopOrder.auxPrice = orders.entryOrder.lmtPrice + od.config.stopTarget
if conf.detector == 'threeBarPattern':
orders, amount = checkTradeExecution(dataStore.third, orders)
elif conf.detector == 'Crossover':
orders, amount = checkTradeExecution(dataStream[dataStore.curEmaIndex+1], orders)
logging.warn('position config %s', od.config)
# check if the trade executed
if orders is not None:
logging.warn('opened a position: %s', orders)
positions.append(orders)
totals['tf'] += orders.entryOrder.lmtPrice * orders.entryOrder.totalQuantity
totals['op'] += orders.entryOrder.totalQuantity
elif orders is None and amount is not None:
logging.warn('opened and closed a position in third bar')
totals['gl'] += amount
logging.debug('totalFundsInPlay: %.2f', totals['tf'])
if conf.detector == 'threeBarPattern':
dataStore.first = dataStore.second
dataStore.second = dataStore.third
dataStore.third = getNextBar(dataStream, i)
if len(positions) != 0:
positions, totals = checkPositions(wc, positions, conf, dataStore, dataStream, i, totals)
for p in positions:
if p.entryOrder.action == 'BUY':
totals['lo'] = (dataStream[len(dataStream)-1].close - p.entryOrder.lmtPrice) *p.entryOrder.totalQuantity
else:
totals['lo'] = (p.entryOrder.lmtPrice - dataStream[len(dataStream)-1].close) *p.entryOrder.totalQuantity
return totals
# only used to check the third bar for if the order bought/sold in the third bar during "blur"
# eg this is an unknown because we aren't analyzing by-second data
def checkTradeExecution(bar, orders):
if orders.entryOrder.lmtPrice <= bar.high and orders.stopOrder.auxPrice >= bar.low:
amount = None
if orders.entryOrder.action == 'BUY':
amount = (orders.stopOrder.auxPrice - orders.entryOrder.lmtPrice) *orders.entryOrder.totalQuantity
else:
amount = (orders.entryOrder.lmtPrice - orders.stopOrder.auxPrice) *orders.entryOrder.totalQuantity
return None, amount
else:
return orders, None
# check all the open positions
def checkPositions(wc, positions, conf, dataStore, dataStream, index, totals):
for position in positions:
closed, amount = None, None
if conf.detector == 'threeBarPattern':
closed, amount = checkPosition(dataStore.third, position)
elif conf.detector == 'Crossover':
closed, amount = checkPosition(dataStream[index], position)
if closed:
logging.warn('closed a position: {} {} {} {} {}'.format(amount, closed, position, dataStore, dataStream[index]))
totals['gl'] += amount
if totals['tf'] > totals['mf']:
totals['mf'] = totals['tf']
totals['tf'] -= position.entryOrder.lmtPrice * position.entryOrder.totalQuantity
positions.remove(position)
elif not closed and position.stopOrder.orderType == 'TRAIL':
closePrice = dataStream[index].close
if closePrice > position.entryOrder.lmtPrice:
if conf.stopPercent is not None:
if position.entryOrder.action == 'BUY':
position.stopOrder.auxPrice = order.Round( closePrice * (100.0 - position.stopOrder.trailingPercent)/100.0, wc.priceIncrement)
else:
position.stopOrder.auxPrice = order.Round( closePrice * (100.0 + position.stopOrder.trailingPercent)/100.0, wc.priceIncrement)
elif conf.stopTarget:
if position.entryOrder.action == 'BUY':
position.stopOrder.auxPrice = order.Round( closePrice - conf.stopTarget, wc.priceIncrement)
else:
position.stopOrder.auxPrice = order.Round( closePrice + conf.stopTarget, wc.priceIncrement)
#else position stays, no changes
return positions, totals
# check if a "position" (represented by a fictitious order) changed in the bar
# returns orderDetails and amount
def checkPosition(bar, position):
amount, executed = checkStopProfit(position, bar)
if executed == False:
# order became a position, say so
return False, None
elif executed == True or executed == None:
# position closed, return amount
return True, amount
else:
logging.error('problem with position checking %s %s', position, bar)
return None, None
# orderDetails represents a ficitious order which:
# fails to execute
# opens and closes really fast (inside the next bar)
# becomes a "position" representing shares held
# returns amount or None (error condition)
# need another value which is "continue"
# returns
# True|False as to whether the trade executed
# amount neg or pos (loss/gain) or None if unknown
def checkStopProfit(position, bar):
amount = None
executed = None
# executed at stop price
if position.stopOrder.auxPrice >= bar.low and position.exitOrder.lmtPrice > bar.high:
if position.entryOrder.action == 'BUY':
amount = position.stopOrder.auxPrice - position.entryOrder.lmtPrice
else:
amount = position.entryOrder.lmtPrice - position.stopOrder.auxPrice
logging.info('closing position at a loss: {} {} {}'.format(amount, position, bar))
executed = True
# executed at profit price
elif position.stopOrder.auxPrice < bar.low and position.exitOrder.lmtPrice <= bar.high:
if position.entryOrder.action == 'BUY':
amount = position.exitOrder.lmtPrice - position.entryOrder.lmtPrice
else:
amount = position.entryOrder.lmtPrice - position.exitOrder.lmtPrice
logging.info('closing position at a gain: {} {} {}'.format(amount, position, bar))
executed = True
# did not execute, no delta, stays as a position
elif position.stopOrder.auxPrice < bar.low and position.exitOrder.lmtPrice > bar.high:
logging.info('not closing a position {} {}'.format(position, bar))
executed = False
amount = None
# unknown execution, assume loss
elif position.stopOrder.auxPrice >= bar.low and position.exitOrder.lmtPrice <= bar.high:
logging.info('wonky: closing position: {}'.format(position))
executed = None
if position.entryOrder.action == 'BUY':
amount = position.stopOrder.auxPrice - position.entryOrder.lmtPrice
else:
amount = position.entryOrder.lmtPrice - position.stopOrder.auxPrice
else:
logging.fatal('unhandled {} {}'.format(position, bar))
if amount is not None:
amount = amount * position.entryOrder.totalQuantity
return amount, executed
########################DRAGONS!
def processScriptOutput():
ds = {}
with open('../esData', 'r') as f:
while True:
s = f.readline()
if not s:
break
kv = s.split()
vh = kv[1].split(':')
try:
ds[kv[0]][vh[0]] = vh[1]
except KeyError:
ds[kv[0]] = {}
ds[kv[0]][vh[0]] = vh[1]
one={}
five={}
ten={}
fourteen={}
thirty={}
sixty={}
total = {}
for k, v in ds.items():
for d, gl in v.items():
d = int(d)
gl = float(gl)
if d == 1:
one[k] = gl
elif d == 5:
five[k] = gl
elif d == 10:
ten[k] = gl
elif d == 14:
fourteen[k] = gl
elif d == 30:
thirty[k] = gl
elif d == 60:
sixty[k] = gl
return one, five, ten, fourteen, thirty, sixty
# send output of processscriptoutput
# inFromOut is output of script outputthing above
def findUnion(inFromOut):
best = set()
for arr in inFromOut:
i = 0
for k in sorted(arr, key=arr.get, reverse=True):
i += 1
if i > 30:
break
best.add(k)
return best
# regex is like 'lI:40,sI:15,w:15,sT:5,pT:7'
def filterBest(regex, inFromOut):
#r = re.compile('lI:40,sI:15,w:(5|15),sT.*')
r = re.compile(regex)
for arr in inFromOut:
print('mark')
for k in sorted(arr, key=arr.get, reverse=True):
m = r.match(k)
if m:
print(k, arr[k])
def feedFromUnionToPositiveKeyFinder(best, mult, inFromOut):
d = {}
for k in best:
r = getFromIn(k, mult, inFromOut)
if r:
# FIXME: make -1
for i in range(len(inFromOut)-1, 0, -1):
try:
t = None
t = inFromOut[i][k]
if t is not None:
d[k] = t
break
except KeyError:
zz =5
return d
def getBestValue(inFromOut):
best = findUnion(inFromOut)
ds = []
for i in [1.5, 2, 2.5, 3]:
print('at multiplier ', i)
d = feedFromUnionToPositiveKeyFinder(best, i, inFromOut)
for k, v in d.items():
if v > 20000:
print(k, v)
print('')
def getFromIn(key, mult, inFromOut):
v = []
for i in inFromOut:
try:
v.append(i[key])
except KeyError:
zzz = 5
p = v[0]
f = False
for j in v:
if j < 1:
f = False
break
elif j > mult * p:
f = True
else:
f = False
p = j
if f:
return key
return None
| 39.963964
| 175
| 0.586865
|
5ba78c115ad3e810bbe1374a224fe105cf01ca7d
| 1,308
|
py
|
Python
|
setup.py
|
tupian-language-resources/pytular
|
5eaedcd824af06b32d6bb2b8fdebffda7180bc1e
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
tupian-language-resources/pytular
|
5eaedcd824af06b32d6bb2b8fdebffda7180bc1e
|
[
"Apache-2.0"
] | 1
|
2021-03-23T13:02:43.000Z
|
2021-03-25T13:32:53.000Z
|
setup.py
|
tupian-language-resources/pytular
|
5eaedcd824af06b32d6bb2b8fdebffda7180bc1e
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup, find_packages
setup(
name='pytular',
version='0.1.1.dev0',
license='Apache 2.0',
description='TuLaR curation library',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
author='Robert Forkel',
author_email='forkel@shh.mpg.de',
url='',
keywords='data',
packages=find_packages(where='src'),
package_dir={'': 'src'},
include_package_data=True,
zip_safe=False,
platforms='any',
python_requires='>=3.6',
install_requires=[
'attrs>=19.3',
'clldutils>=3.5',
'csvw',
'gspread',
'oauth2client',
],
extras_require={
'dev': ['flake8', 'wheel', 'twine'],
'test': [
'pytest>=4.3',
'pytest-mock',
'pytest-cov',
'coverage>=4.2',
],
},
entry_points={
#'console_scripts': [
# 'tular=pytular.__main__:main',
#]
},
)
| 25.153846
| 50
| 0.541284
|
66faa747e967ad20958b2f608c76eeb8f9c225f2
| 392
|
py
|
Python
|
cajas/investments/admin.py
|
dmontoya1/cajas
|
5eb3d5835250d5dafae398082200b79c1ca8063b
|
[
"MIT"
] | null | null | null |
cajas/investments/admin.py
|
dmontoya1/cajas
|
5eb3d5835250d5dafae398082200b79c1ca8063b
|
[
"MIT"
] | null | null | null |
cajas/investments/admin.py
|
dmontoya1/cajas
|
5eb3d5835250d5dafae398082200b79c1ca8063b
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models.investment import Investment
from .models.investment_pay import InvestmentPay
class InvestmentPayStacked(admin.StackedInline):
model = InvestmentPay
extra = 0
@admin.register(Investment)
class InvestmentAdmin(admin.ModelAdmin):
list_display = ('partner', 'date', 'element', 'total_value')
inlines = [InvestmentPayStacked]
| 20.631579
| 64
| 0.765306
|
4c9ea2da23d032469327a9f39b809d37f2a71b58
| 11,491
|
py
|
Python
|
forecast/migrations/0002_data_20200522.py
|
uktrade/fadmin2
|
0f774400fb816c9ca30e30b25ae542135966e185
|
[
"MIT"
] | 3
|
2020-01-05T16:46:42.000Z
|
2021-08-02T08:08:39.000Z
|
forecast/migrations/0002_data_20200522.py
|
uktrade/fadmin2
|
0f774400fb816c9ca30e30b25ae542135966e185
|
[
"MIT"
] | 30
|
2019-11-28T15:16:35.000Z
|
2021-08-16T14:49:58.000Z
|
forecast/migrations/0002_data_20200522.py
|
uktrade/fadmin2
|
0f774400fb816c9ca30e30b25ae542135966e185
|
[
"MIT"
] | null | null | null |
from django.db import migrations
fields = [
"financial_period_code",
"period_long_name",
"period_short_name",
"period_calendar_code",
]
periods = [
[1, "April", "Apr", 4],
[2, "May", "May", 5],
[3, "June", "Jun", 6],
[4, "July", "Jul", 7],
[5, "August", "Aug", 8],
[6, "September", "Sep", 9],
[7, "October", "Oct", 10],
[8, "November", "Nov", 11],
[9, "December", "Dec", 12],
[10, "January", "Jan", 1],
[11, "February", "Feb", 2],
[12, "March", "Mar", 3],
[13, "Adjustment 1", "Adj1", 0],
[14, "Adjustment 2", "Adj2", 0],
[15, "Adjustment 3", "Adj3", 0],
]
def populate_period(apps, schema_editor):
PeriodModel = apps.get_model("forecast", "FinancialPeriod")
for l in periods:
d = dict(zip(fields, l))
obj, created = PeriodModel.objects.get_or_create(**d)
def create_forecast_expenditure_types(apps, schema_editor):
ForecastExpenditureType = apps.get_model("forecast", "ForecastExpenditureType")
BudgetType = apps.get_model("chartofaccountDIT", "BudgetType")
del_type = BudgetType.objects.get(budget_type_key="DEL")
ame_type = BudgetType.objects.get(budget_type_key="AME")
admin_type = BudgetType.objects.get(budget_type_key="ADMIN")
ForecastExpenditureType.objects.create(
forecast_expenditure_type_name="Capital",
forecast_expenditure_type_description="Capital",
forecast_expenditure_type_display_order=3,
nac_economic_budget_code="CAPITAL",
programme_budget_type=del_type,
).save()
ForecastExpenditureType.objects.create(
forecast_expenditure_type_name="Capital",
forecast_expenditure_type_description="Capital",
forecast_expenditure_type_display_order=3,
nac_economic_budget_code="CAPITAL",
programme_budget_type=ame_type,
).save()
ForecastExpenditureType.objects.create(
forecast_expenditure_type_name="Capital",
forecast_expenditure_type_description="Capital",
forecast_expenditure_type_display_order=3,
nac_economic_budget_code="CAPITAL",
programme_budget_type=admin_type,
).save()
ForecastExpenditureType.objects.create(
nac_economic_budget_code="RESOURCE",
programme_budget_type=del_type,
forecast_expenditure_type_name='Programme',
forecast_expenditure_type_description='Programme Resource',
forecast_expenditure_type_display_order=2
).save()
ForecastExpenditureType.objects.create(
nac_economic_budget_code="RESOURCE",
programme_budget_type=ame_type,
forecast_expenditure_type_name='Programme',
forecast_expenditure_type_description='Programme Resource',
forecast_expenditure_type_display_order=2
).save()
ForecastExpenditureType.objects.create(
nac_economic_budget_code="RESOURCE",
programme_budget_type=admin_type,
forecast_expenditure_type_name='Admin',
forecast_expenditure_type_description='Admin Resource',
forecast_expenditure_type_display_order=1
).save()
def create_forecast_lock(apps, schema_editor):
ForecastEditState = apps.get_model('forecast', 'ForecastEditState')
ForecastEditState.objects.create()
class Migration(migrations.Migration):
dependencies = [("forecast", "0001_initial")]
operations = [
migrations.RunPython(populate_period),
migrations.RunPython(create_forecast_expenditure_types),
migrations.RunPython(create_forecast_lock),
# 0050_auto_20200116_1204
migrations.RunSQL("""UPDATE public."chartofaccountDIT_budgettype"
SET budget_type_display_order=1
WHERE budget_type_key = 'DEL';
UPDATE public."chartofaccountDIT_budgettype"
SET budget_type_display_order=1
WHERE budget_type_key = 'ADMIN';
UPDATE public."chartofaccountDIT_budgettype"
SET budget_type_display_order=2
WHERE budget_type_key = 'AME';
"""),
# 0051_create_budget_forecast_view
migrations.RunSQL(
"""
DROP VIEW if exists forecast_forecast_budget_view ;
DROP VIEW if exists yearly_budget;
DROP VIEW if exists annual_forecast;
CREATE VIEW annual_forecast as
SELECT financial_code_id, financial_year_id,
SUM(CASE WHEN financial_period_id = 1 THEN amount ELSE NULL END) AS apr,
SUM(CASE WHEN financial_period_id = 2 THEN amount ELSE NULL END) AS may,
SUM(CASE WHEN financial_period_id = 3 THEN amount ELSE NULL END) AS jun,
SUM(CASE WHEN financial_period_id = 4 THEN amount ELSE NULL END) AS jul,
SUM(CASE WHEN financial_period_id = 5 THEN amount ELSE NULL END) AS aug,
SUM(CASE WHEN financial_period_id = 6 THEN amount ELSE NULL END) AS sep,
SUM(CASE WHEN financial_period_id = 7 THEN amount ELSE NULL END) AS oct,
SUM(CASE WHEN financial_period_id = 8 THEN amount ELSE NULL END) AS nov,
SUM(CASE WHEN financial_period_id = 9 THEN amount ELSE NULL END) AS "dec",
SUM(CASE WHEN financial_period_id = 10 THEN amount ELSE NULL END) AS jan,
SUM(CASE WHEN financial_period_id = 11 THEN amount ELSE NULL END) AS feb,
SUM(CASE WHEN financial_period_id = 12 THEN amount ELSE NULL END) AS mar,
SUM(CASE WHEN financial_period_id = 13 THEN amount ELSE NULL END) AS adj1 ,
SUM(CASE WHEN financial_period_id = 14 THEN amount ELSE NULL END) AS adj2 ,
SUM(CASE WHEN financial_period_id = 15 THEN amount ELSE NULL END) AS adj3
FROM forecast_forecastmonthlyfigure
GROUP BY financial_code_id, financial_year_id;
CREATE VIEW yearly_budget as
SELECT financial_code_id, financial_year_id, SUM(amount) AS budget
FROM forecast_budgetmonthlyfigure
GROUP BY financial_code_id, financial_year_id;
CREATE VIEW public.forecast_forecast_budget_view
as
SELECT coalesce(b.financial_code_id, f.financial_code_id) as financial_code_id,
coalesce(b.financial_year_id, f.financial_year_id) as financial_year,
coalesce(budget, 0) as budget,
coalesce(apr, 0) as apr,
coalesce(may, 0) as may,
coalesce(jun, 0) as jun,
coalesce(jul, 0) as jul,
coalesce(aug, 0) as aug,
coalesce(sep, 0) as sep,
coalesce(oct, 0) as oct,
coalesce(nov, 0) as nov,
coalesce("dec", 0) as "dec",
coalesce(jan, 0) as jan,
coalesce(feb, 0) as feb,
coalesce(mar, 0) as mar,
coalesce(adj1, 0) as adj1,
coalesce(adj2, 0) as adj2,
coalesce(adj3, 0) as adj3
FROM annual_forecast f
FULL OUTER JOIN yearly_budget b
on b.financial_code_id = f.financial_code_id and b.financial_year_id = f.financial_year_id;
""",
"""
DROP VIEW if exists forecast_forecast_budget_view;
DROP VIEW if exists yearly_budget;
DROP VIEW if exists annual_forecast;
""",
),
migrations.RunSQL(
"""
DROP VIEW if exists forecast_forecast_budget_view CASCADE;
DROP VIEW if exists yearly_budget CASCADE;
DROP VIEW if exists annual_forecast CASCADE;
CREATE VIEW annual_forecast as
SELECT financial_code_id, financial_year_id,
SUM(CASE WHEN financial_period_id = 1 THEN amount ELSE NULL END) AS apr,
SUM(CASE WHEN financial_period_id = 2 THEN amount ELSE NULL END) AS may,
SUM(CASE WHEN financial_period_id = 3 THEN amount ELSE NULL END) AS jun,
SUM(CASE WHEN financial_period_id = 4 THEN amount ELSE NULL END) AS jul,
SUM(CASE WHEN financial_period_id = 5 THEN amount ELSE NULL END) AS aug,
SUM(CASE WHEN financial_period_id = 6 THEN amount ELSE NULL END) AS sep,
SUM(CASE WHEN financial_period_id = 7 THEN amount ELSE NULL END) AS oct,
SUM(CASE WHEN financial_period_id = 8 THEN amount ELSE NULL END) AS nov,
SUM(CASE WHEN financial_period_id = 9 THEN amount ELSE NULL END) AS "dec",
SUM(CASE WHEN financial_period_id = 10 THEN amount ELSE NULL END) AS jan,
SUM(CASE WHEN financial_period_id = 11 THEN amount ELSE NULL END) AS feb,
SUM(CASE WHEN financial_period_id = 12 THEN amount ELSE NULL END) AS mar,
SUM(CASE WHEN financial_period_id = 13 THEN amount ELSE NULL END) AS adj1 ,
SUM(CASE WHEN financial_period_id = 14 THEN amount ELSE NULL END) AS adj2 ,
SUM(CASE WHEN financial_period_id = 15 THEN amount ELSE NULL END) AS adj3
FROM forecast_forecastmonthlyfigure
WHERE forecast_forecastmonthlyfigure.archived_status_id is NULL
GROUP BY financial_code_id, financial_year_id;
CREATE VIEW yearly_budget as
SELECT financial_code_id, financial_year_id, archived_status_id, SUM(amount) AS budget
FROM forecast_budgetmonthlyfigure
WHERE forecast_budgetmonthlyfigure.archived_status_id is NULL
GROUP BY financial_code_id, financial_year_id, archived_status_id;
CREATE VIEW public.forecast_forecast_budget_view
as
SELECT coalesce(b.financial_code_id, f.financial_code_id) as financial_code_id,
coalesce(b.financial_year_id, f.financial_year_id) as financial_year,
coalesce(budget, 0) as budget,
coalesce(apr, 0) as apr,
coalesce(may, 0) as may,
coalesce(jun, 0) as jun,
coalesce(jul, 0) as jul,
coalesce(aug, 0) as aug,
coalesce(sep, 0) as sep,
coalesce(oct, 0) as oct,
coalesce(nov, 0) as nov,
coalesce("dec", 0) as "dec",
coalesce(jan, 0) as jan,
coalesce(feb, 0) as feb,
coalesce(mar, 0) as mar,
coalesce(adj1, 0) as adj1,
coalesce(adj2, 0) as adj2,
coalesce(adj3, 0) as adj3
FROM annual_forecast f
FULL OUTER JOIN yearly_budget b
on b.financial_code_id = f.financial_code_id and b.financial_year_id = f.financial_year_id;
""",
),
]
| 48.281513
| 131
| 0.593334
|
bcdc2311f31ad93c801c92cea9f7ed8a0d42e7a3
| 753
|
py
|
Python
|
duckdown/tool/secure.py
|
blueshed/duckdown
|
e6d0e62d378bd2d9ed0cd5ce4bc7ab3476b86020
|
[
"MIT"
] | null | null | null |
duckdown/tool/secure.py
|
blueshed/duckdown
|
e6d0e62d378bd2d9ed0cd5ce4bc7ab3476b86020
|
[
"MIT"
] | null | null | null |
duckdown/tool/secure.py
|
blueshed/duckdown
|
e6d0e62d378bd2d9ed0cd5ce4bc7ab3476b86020
|
[
"MIT"
] | null | null | null |
""" secure a users file """
from invoke import task
from dotenv import load_dotenv
from cryptography.fernet import Fernet
from duckdown.utils.encrypt import encrypt, decrypt
from duckdown.utils import json_utils
@task
def secure(_, path):
""" encrypt the passwords in a users.json file """
load_dotenv(verbose=True)
with open(path) as file:
users = json_utils.load(file)
new_values = {}
for user in users:
new_values[user] = encrypt(user)
print(json_utils.dumps(new_values))
@task
def unsecure(_, data):
""" return to normal """
load_dotenv(verbose=True)
print(decrypt(data))
@task
def gen_key(_):
""" generates a secret key for use as DKDN_KEY envar"""
return Fernet.generate_key()
| 21.514286
| 60
| 0.690571
|
223781115de394c66850498ffc12d01bb9f1ef7d
| 728
|
py
|
Python
|
django_project/urls.py
|
sankaet/IOT-DB
|
a554f49b9c25ae1a9a91b6a2564489b999da03bd
|
[
"MIT"
] | 1
|
2016-10-26T23:10:57.000Z
|
2016-10-26T23:10:57.000Z
|
django_project/urls.py
|
sankaet/IOT-DB
|
a554f49b9c25ae1a9a91b6a2564489b999da03bd
|
[
"MIT"
] | null | null | null |
django_project/urls.py
|
sankaet/IOT-DB
|
a554f49b9c25ae1a9a91b6a2564489b999da03bd
|
[
"MIT"
] | null | null | null |
from django.conf.urls import patterns, include, url
from iot import v1_views
from django.views.generic import TemplateView
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'django_project.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
# url(r'^admin/', include(admin.site.urls)),
url(r'^v1/schemas$', v1_views.schemas, name='schemas'),
url(r'^v1/schemas/(?P<schema_id>[-\w]+)$', v1_views.schema_by_id, name='schema_by_id'),
url(r'^v1/schemas/(?P<schema_id>[-\w]+)/data$', v1_views.data, name='data'),
url(r'^v1/schemas/(?P<schema_id>[-\w]+)/data/(?P<data_id>[-\w]+)$', v1_views.data_by_id, name='data_by_id'),
)
| 38.315789
| 112
| 0.656593
|
3a4649dd21dcbd5f9e9a9821c3e9b2096f16a7a2
| 38,075
|
py
|
Python
|
rplugin/python3/denite/ui/default.py
|
mkinoshi/new-denite
|
5889dc4acfd11c881a3d54bea4023df09561aff5
|
[
"MIT"
] | null | null | null |
rplugin/python3/denite/ui/default.py
|
mkinoshi/new-denite
|
5889dc4acfd11c881a3d54bea4023df09561aff5
|
[
"MIT"
] | null | null | null |
rplugin/python3/denite/ui/default.py
|
mkinoshi/new-denite
|
5889dc4acfd11c881a3d54bea4023df09561aff5
|
[
"MIT"
] | null | null | null |
# ============================================================================
# FILE: default.py
# AUTHOR: Shougo Matsushita <Shougo.Matsu at gmail.com>
# License: MIT license
# ============================================================================
import copy
import re
import weakref
from itertools import groupby, takewhile
from denite.util import (
clear_cmdline, echo, error, regex_convert_py_vim, clearmatch)
from .action import DEFAULT_ACTION_KEYMAP
from .prompt import DenitePrompt
from denite.parent import SyncParent
from ..prompt.prompt import STATUS_ACCEPT, STATUS_INTERRUPT
class Default(object):
@property
def is_async(self):
return self._denite.is_async()
@property
def current_mode(self):
return self._current_mode
def __init__(self, vim):
self._vim = vim
self._denite = None
self._cursor = 0
self._win_cursor = 2
self._selected_candidates = []
self._candidates = []
self._candidates_len = 0
self._result = []
self._context = {}
self._current_mode = ''
self._mode_stack = []
self._current_mappings = {}
self._bufnr = -1
self._winid = -1
self._winrestcmd = ''
self._initialized = False
self._winheight = 0
self._winwidth = 0
self._winminheight = -1
self._scroll = 0
self._is_multi = False
self._matched_pattern = ''
self._displayed_texts = []
self._statusline_sources = ''
self._prompt = DenitePrompt(
self._vim,
self._context,
weakref.proxy(self)
)
self._guicursor = ''
self._titlestring = ''
self._ruler = False
self._prev_action = ''
self._prev_status = {}
self._prev_curpos = []
self._is_suspend = False
self._save_window_options = {}
self._sources_history = []
def start(self, sources, context):
if not self._denite:
self._denite = SyncParent(self._vim)
self._result = []
context['sources_queue'] = [sources]
self._sources_history = []
try:
while context['sources_queue']:
prev_history = copy.copy(self._sources_history)
prev_path = context['path']
self._start(context['sources_queue'][0], context)
if prev_history == self._sources_history:
self._sources_history.append({
'sources': context['sources_queue'][0],
'path': prev_path,
})
context['sources_queue'].pop(0)
context['path'] = self._context['path']
finally:
self.cleanup()
return self._result
def _start(self, sources, context):
self._vim.command('silent! autocmd! denite')
if re.search(r'\[Command Line\]$', self._vim.current.buffer.name):
# Ignore command line window.
return
if self._initialized and context['resume']:
# Skip the initialization
if not self._is_suspend:
if context['mode']:
self._current_mode = context['mode']
update = ('immediately', 'immediately_1',
'cursor_wrap', 'cursor_pos', 'prev_winid',
'quick_move')
for key in update:
self._context[key] = context[key]
if self.check_option():
return
self.init_buffer()
if context['refresh']:
self.redraw()
else:
if not context['mode']:
# Default mode
context['mode'] = 'insert'
self._context.clear()
self._context.update(context)
self._context['sources'] = sources
self._context['is_redraw'] = False
self._current_mode = context['mode']
self._is_multi = len(sources) > 1
if not sources:
# Ignore empty sources.
error(self._vim, 'Empty sources')
return
self.init_denite()
self.gather_candidates()
self.update_candidates()
self.init_cursor()
if self.check_option():
return
self.init_buffer()
self._is_suspend = False
self.update_displayed_texts()
self.change_mode(self._current_mode)
self.update_buffer()
if self._context['quick_move'] and self.quick_move():
return
# Make sure that the caret position is ok
self._prompt.caret.locus = self._prompt.caret.tail
status = self._prompt.start()
if status == STATUS_INTERRUPT:
# STATUS_INTERRUPT is returned when user hit <C-c> and the loop has
# interrupted.
# In this case, denite cancel any operation and close its window.
self.quit()
return
def init_buffer(self):
self._prev_status = dict()
self._displayed_texts = []
if not self._is_suspend:
self._prev_bufnr = self._vim.current.buffer.number
self._prev_curpos = self._vim.call('getcurpos')
self._prev_wininfo = self._get_wininfo()
self._prev_winid = int(self._context['prev_winid'])
self._winrestcmd = self._vim.call('winrestcmd')
self._scroll = int(self._context['scroll'])
if self._scroll == 0:
self._scroll = round(self._winheight / 2)
if self._context['cursor_shape']:
self._guicursor = self._vim.options['guicursor']
self._vim.options['guicursor'] = 'a:None'
self._titlestring = self._vim.options['titlestring']
self._ruler = self._vim.options['ruler']
self._switch_buffer()
self.resize_buffer()
self._winheight = self._vim.current.window.height
self._winwidth = self._vim.current.window.width
self._options = self._vim.current.buffer.options
self._options['buftype'] = 'nofile'
self._options['bufhidden'] = 'delete'
self._options['swapfile'] = False
self._options['buflisted'] = False
self._options['modeline'] = False
self._options['filetype'] = 'denite'
self._options['modifiable'] = True
if self._context['split'] == 'floating':
# Disable ruler
self._vim.options['ruler'] = False
self._window_options = self._vim.current.window.options
window_options = {
'colorcolumn': '',
'conceallevel': 3,
'concealcursor': 'n',
'cursorcolumn': False,
'foldenable': False,
'foldcolumn': 0,
'list': False,
'number': False,
'relativenumber': False,
'spell': False,
'winfixheight': True,
'wrap': False,
}
if self._context['cursorline']:
window_options['cursorline'] = True
self._save_window_options = {}
for k, v in window_options.items():
self._save_window_options[k] = self._window_options[k]
self._window_options[k] = v
self._bufvars = self._vim.current.buffer.vars
self._bufnr = self._vim.current.buffer.number
self._winid = self._vim.call('win_getid')
self._bufvars['denite_statusline'] = {}
self._vim.vars['denite#_previewed_buffers'] = {}
self._vim.command('silent doautocmd WinEnter')
self._vim.command('silent doautocmd BufWinEnter')
self._vim.command('doautocmd FileType denite')
self.init_syntax()
def _switch_buffer(self):
split = self._context['split']
if (split != 'no' and self._winid > 0 and
self._vim.call('win_gotoid', self._winid)):
if (not self._is_suspend and
split != 'vertical' and split != 'floating'):
# Move the window to bottom
self._vim.command('wincmd J')
self._winrestcmd = ''
else:
command = 'edit'
if split == 'tab':
self._vim.command('tabnew')
elif (split == 'floating' and
self._vim.call('exists', '*nvim_open_win')):
# Use floating window
self._vim.call(
'nvim_open_win',
self._vim.call('bufnr', '%'), True, {
'relative': 'editor',
'row': int(self._context['winrow']),
'col': int(self._context['wincol']),
'width': int(self._context['winwidth']),
'height': int(self._context['winheight']),
})
self._vim.current.window.options['winhighlight'] = (
'Normal:' + self._context['highlight_window_background']
)
elif split != 'no':
command = self._get_direction()
command += ' vsplit' if split == 'vertical' else ' split'
self._vim.call(
'denite#util#execute_path',
f'silent keepalt {command}', '[denite]')
def _get_direction(self):
direction = self._context['direction']
if direction == 'dynamictop' or direction == 'dynamicbottom':
self.update_displayed_texts()
winwidth = self._vim.call('winwidth', 0)
is_fit = not [x for x in self._displayed_texts
if self._vim.call('strwidth', x) > winwidth]
if direction == 'dynamictop':
direction = 'aboveleft' if is_fit else 'topleft'
else:
direction = 'belowright' if is_fit else 'botright'
return direction
def _get_wininfo(self):
return [
self._vim.options['columns'], self._vim.options['lines'],
self._vim.call('win_getid'),
]
def _switch_prev_buffer(self):
if (self._prev_bufnr == self._bufnr or
self._vim.buffers[self._prev_bufnr].name == ''):
self._vim.command('enew')
else:
self._vim.command('buffer ' + str(self._prev_bufnr))
def init_syntax(self):
self._vim.command('syntax case ignore')
self._vim.command('highlight default link deniteMode ModeMsg')
self._vim.command('highlight link deniteMatchedRange ' +
self._context['highlight_matched_range'])
self._vim.command('highlight link deniteMatchedChar ' +
self._context['highlight_matched_char'])
self._vim.command('highlight default link ' +
'deniteStatusLinePath Comment')
self._vim.command('highlight default link ' +
'deniteStatusLineNumber LineNR')
self._vim.command('highlight default link ' +
'deniteSelectedLine Statement')
self._vim.command(('syntax match deniteSelectedLine /^[%s].*/' +
' contains=deniteConcealedMark') % (
self._context['selected_icon']))
self._vim.command(('syntax match deniteConcealedMark /^[ %s]/' +
' conceal contained') % (
self._context['selected_icon']))
self._denite.init_syntax(self._context, self._is_multi)
def init_cursor(self):
self._win_cursor = 2
self._cursor = 0
if self._context['reversed']:
self.move_to_last_line()
def update_candidates(self):
(pattern, statuses,
self._candidates) = self._denite.filter_candidates(self._context)
self._candidates = self.get_current_path() + self._candidates
prev_matched_pattern = self._matched_pattern
self._matched_pattern = pattern
self._candidates_len = len(self._candidates)
self._statusline_sources = ' '.join(statuses)
prev_displayed_texts = self._displayed_texts
self.update_displayed_texts()
updated = (self._displayed_texts != prev_displayed_texts or
self._matched_pattern != prev_matched_pattern)
if updated and self._denite.is_async() and self._context['reversed']:
self.init_cursor()
return updated
def update_displayed_texts(self):
if self._context['auto_resize']:
winminheight = int(self._context['winminheight'])
if (winminheight is not -1 and
self._candidates_len < winminheight):
self._winheight = winminheight
elif self._candidates_len > int(self._context['winheight']):
self._winheight = int(self._context['winheight'])
elif self._candidates_len != self._winheight:
self._winheight = self._candidates_len
self._displayed_texts = [
self.get_candidate_display_text(i)
for i in range(self._cursor,
min(self._candidates_len,
self._cursor + self._winheight))
]
def get_current_path(self):
return [self._context['path']]
def update_buffer(self):
if self._bufnr != self._vim.current.buffer.number:
return
self.update_status()
if self._vim.call('hlexists', 'deniteMatchedRange'):
self._vim.command('silent! syntax clear deniteMatchedRange')
if self._vim.call('hlexists', 'deniteMatchedChar'):
self._vim.command('silent! syntax clear deniteMatchedChar')
if self._matched_pattern != '':
self._vim.command(
r'silent! syntax match deniteMatchedRange /\c%s/ contained' %
(regex_convert_py_vim(self._matched_pattern))
)
self._vim.command((
'silent! syntax match deniteMatchedChar /[%s]/ '
'containedin=deniteMatchedRange contained'
) % re.sub(
r'([\[\]\\^-])',
r'\\\1',
self._context['input'].replace(' ', '')
))
self._vim.current.buffer[:] = self._displayed_texts
self.resize_buffer()
self.move_cursor()
def update_status(self):
raw_mode = self._current_mode.upper()
cursor_location = self._cursor + self._win_cursor
max_len = len(str(self._candidates_len))
linenr = ('{:'+str(max_len)+'}/{:'+str(max_len)+'}').format(
cursor_location,
self._candidates_len)
mode = '-- ' + raw_mode + ' -- '
if self._context['error_messages']:
mode = '[ERROR] ' + mode
path = '[' + self._context['path'] + ']'
status = {
'mode': mode,
'sources': self._statusline_sources,
'path': path,
'linenr': linenr,
# Extra
'raw_mode': raw_mode,
'buffer_name': self._context['buffer_name'],
'line_cursor': cursor_location,
'line_total': self._candidates_len,
}
if status != self._prev_status:
self._bufvars['denite_statusline'] = status
self._vim.command('redrawstatus')
self._prev_status = status
if self._context['statusline']:
status = (
"%#deniteMode#%{denite#get_status('mode')}%* " +
"%{denite#get_status('sources')} %=" +
"%#deniteStatusLinePath# %{denite#get_status('path')} %*" +
"%#deniteStatusLineNumber#%{denite#get_status('linenr')}%*")
if self._context['split'] == 'floating':
self._vim.options['titlestring'] = status
else:
self._window_options['statusline'] = status
def update_cursor(self):
self.update_displayed_texts()
self.update_buffer()
def get_display_source_name(self, name):
source_names = self._context['source_names']
if not self._is_multi or source_names == 'hide':
source_name = ''
else:
short_name = (re.sub(r'([a-zA-Z])[a-zA-Z]+', r'\1', name)
if re.search(r'[^a-zA-Z]', name) else name[:2])
source_name = short_name if source_names == 'short' else name
return source_name
def get_current_path_display_text(self, candidate):
return candidate
def get_candidate_display_text(self, index):
if index == 0:
return self.get_current_path_display_text(self._candidates[index])
else:
source_names = self._context['source_names']
candidate = self._candidates[index]
terms = []
if self._is_multi and source_names != 'hide':
terms.append(self.get_display_source_name(
candidate['source_name']))
encoding = self._context['encoding']
abbr = candidate.get('abbr', candidate['word']).encode(
encoding, errors='replace').decode(encoding, errors='replace')
terms.append(abbr[:int(self._context['max_candidate_width'])])
return (self._context['selected_icon']
if index in self._selected_candidates
else ' ') + ' '.join(terms).replace('\n', '')
def resize_buffer(self):
split = self._context['split']
if split == 'no' or split == 'tab':
return
winheight = self._winheight
winwidth = self._winwidth
is_vertical = split == 'vertical'
if not is_vertical and self._vim.current.window.height != winheight:
self._vim.command('resize ' + str(winheight))
if self._context['reversed']:
self._vim.command('normal! zb')
elif is_vertical and self._vim.current.window.width != winwidth:
self._vim.command('vertical resize ' + str(winwidth))
def check_option(self):
if self._context['cursor_pos'].isnumeric():
self.init_cursor()
self.move_to_pos(int(self._context['cursor_pos']))
elif re.match(r'\+\d+', self._context['cursor_pos']):
for _ in range(int(self._context['cursor_pos'][1:])):
self.move_to_next_line()
elif re.match(r'-\d+', self._context['cursor_pos']):
for _ in range(int(self._context['cursor_pos'][1:])):
self.move_to_prev_line()
elif self._context['cursor_pos'] == '$':
self.move_to_last_line()
elif self._context['do'] != '':
self.do_command(self._context['do'])
return True
if (self._candidates and self._context['immediately'] or
len(self._candidates) == 1 and self._context['immediately_1']):
self.do_immediately()
return True
return not (self._context['empty'] or
self._denite.is_async() or self._candidates)
def do_immediately(self):
goto = self._winid > 0 and self._vim.call(
'win_gotoid', self._winid)
if goto:
# Jump to denite window
self.init_buffer()
self.update_cursor()
self.do_action('default')
candidate = self.get_cursor_candidate()
echo(self._vim, 'Normal', '[{}/{}] {}'.format(
self._cursor + self._win_cursor, self._candidates_len,
candidate.get('abbr', candidate['word'])))
if goto:
# Move to the previous window
self.suspend()
self._vim.command('wincmd p')
def do_command(self, command):
self.init_cursor()
self._context['post_action'] = 'suspend'
while self._cursor + self._win_cursor < self._candidates_len:
self.do_action('default', command)
self.move_to_next_line()
self.quit_buffer()
def move_cursor(self):
if self._win_cursor > self._vim.call('line', '$'):
self._win_cursor = self._vim.call('line', '$')
if self._win_cursor != self._vim.call('line', '.'):
self._vim.call('cursor', [self._win_cursor, 1])
if self._context['auto_action']:
self.do_action(self._context['auto_action'])
def change_mode(self, mode):
self._current_mode = mode
custom = self._context['custom']['map']
use_default_mappings = self._context['use_default_mappings']
highlight = 'highlight_mode_' + mode
if highlight in self._context:
self._vim.command('highlight! link CursorLine ' +
self._context[highlight])
# Clear current keymap
self._prompt.keymap.registry.clear()
# Apply mode independent mappings
if use_default_mappings:
self._prompt.keymap.register_from_rules(
self._vim,
DEFAULT_ACTION_KEYMAP.get('_', [])
)
self._prompt.keymap.register_from_rules(
self._vim,
custom.get('_', [])
)
# Apply mode depend mappings
mode = self._current_mode
if use_default_mappings:
self._prompt.keymap.register_from_rules(
self._vim,
DEFAULT_ACTION_KEYMAP.get(mode, [])
)
self._prompt.keymap.register_from_rules(
self._vim,
custom.get(mode, [])
)
# Update mode context
self._context['mode'] = mode
# Update mode indicator
self.update_status()
def cleanup(self):
# Clear previewed buffers
if not self._is_suspend and not self._context['has_preview_window']:
self._vim.command('pclose!')
for bufnr in self._vim.vars['denite#_previewed_buffers'].keys():
if not self._vim.call('win_findbuf', bufnr):
self._vim.command('silent bdelete ' + str(bufnr))
self._vim.vars['denite#_previewed_buffers'] = {}
clearmatch(self._vim)
if not self._context['immediately']:
# Redraw to clear prompt
self._vim.command('redraw | echo ""')
self._vim.command('highlight! link CursorLine CursorLine')
if self._context['cursor_shape']:
self._vim.command('set guicursor&')
self._vim.options['guicursor'] = self._guicursor
if self._context['split'] == 'floating':
self._vim.options['titlestring'] = self._titlestring
self._vim.options['ruler'] = self._ruler
def quit_buffer(self):
self.cleanup()
if self._vim.call('bufwinnr', self._bufnr) < 0:
# Denite buffer is already closed
return
# Restore the window
if self._context['split'] == 'no':
self._window_options['cursorline'] = False
self._switch_prev_buffer()
for k, v in self._save_window_options.items():
self._vim.current.window.options[k] = v
else:
if self._context['split'] == 'tab':
self._vim.command('tabclose!')
if self._context['split'] != 'tab':
self._vim.command('close!')
self._vim.call('win_gotoid', self._prev_winid)
# Restore the position
self._vim.call('setpos', '.', self._prev_curpos)
if self._get_wininfo() and self._get_wininfo() == self._prev_wininfo:
self._vim.command(self._winrestcmd)
def get_cursor_candidate(self):
if self._cursor + self._win_cursor > self._candidates_len:
return {}
return self._candidates[self._cursor + self._win_cursor - 1]
def get_selected_candidates(self):
if not self._selected_candidates:
return [self.get_cursor_candidate()
] if self.get_cursor_candidate() else []
return [self._candidates[x] for x in self._selected_candidates]
def redraw(self, is_force=True):
self._context['is_redraw'] = is_force
if is_force:
self.gather_candidates()
if self.update_candidates():
self.update_buffer()
else:
self.update_status()
self._context['is_redraw'] = False
def quit(self):
self._denite.on_close(self._context)
self.quit_buffer()
self._result = []
return STATUS_ACCEPT
def restart(self):
self.quit_buffer()
self.init_denite()
self.gather_candidates()
self.init_buffer()
self.update_candidates()
self.change_mode(self._current_mode)
self.update_buffer()
def restore_sources(self, context):
if not self._sources_history:
return
history = self._sources_history[-1]
context['sources_queue'].append(history['sources'])
context['path'] = history['path']
self._sources_history.pop()
return STATUS_ACCEPT
def init_denite(self):
self._mode_stack = []
self._prompt.history.reset()
self._denite.start(self._context)
self._denite.on_init(self._context)
self._initialized = True
self._winheight = int(self._context['winheight'])
self._winwidth = int(self._context['winwidth'])
def gather_candidates(self):
self._selected_candidates = []
self._denite.gather_candidates(self._context)
def do_action(self, action_name, command=''):
candidates = self.get_selected_candidates()
if not candidates or not action_name:
return
self._prev_action = action_name
action = self._denite.get_action(
self._context, action_name, candidates)
if not action:
return
post_action = self._context['post_action']
is_quit = action['is_quit'] or post_action == 'quit'
if is_quit:
self.quit()
self._denite.do_action(self._context, action_name, candidates)
self._result = candidates
if command != '':
self._vim.command(command)
if is_quit and (post_action == 'open' or post_action == 'suspend'):
# Re-open denite buffer
self.init_buffer()
self.change_mode(self._current_mode)
self.redraw(False)
# Disable quit flag
is_quit = False
if not is_quit:
self._selected_candidates = []
self.redraw(action['is_redraw'])
if post_action == 'suspend':
self.suspend()
self._vim.command('wincmd p')
return STATUS_ACCEPT
return STATUS_ACCEPT if is_quit else None
def choose_action(self):
candidates = self.get_selected_candidates()
if not candidates:
return
self._vim.vars['denite#_actions'] = self._denite.get_action_names(
self._context, candidates)
clear_cmdline(self._vim)
action = self._vim.call('input', 'Action: ', '',
'customlist,denite#helper#complete_actions')
if action == '':
return
return self.do_action(action)
def move_to_pos(self, pos):
self._cursor = int(pos / self._winheight) * self._winheight
self._win_cursor = (pos % self._winheight) + 1
self.update_cursor()
def move_to_next_line(self):
if self._win_cursor + self._cursor < self._candidates_len:
if self._win_cursor < self._winheight:
self._win_cursor += 1
else:
self._cursor += 1
elif self._context['cursor_wrap']:
self.move_to_first_line()
else:
return
self.update_cursor()
def move_to_prev_line(self):
if self._win_cursor > 1:
self._win_cursor -= 1
elif self._cursor >= 1:
self._cursor -= 1
elif self._context['cursor_wrap']:
self.move_to_last_line()
else:
return
self.update_cursor()
def move_to_first_line(self):
if self._win_cursor > 1 or self._cursor > 0:
self._win_cursor = 1
self._cursor = 0
self.update_cursor()
def move_to_last_line(self):
win_max = min(self._candidates_len, self._winheight)
cur_max = self._candidates_len - win_max
if self._win_cursor < win_max or self._cursor < cur_max:
self._win_cursor = win_max
self._cursor = cur_max
self.update_cursor()
def move_to_top(self):
self._win_cursor = 1
self.update_cursor()
def move_to_middle(self):
self._win_cursor = self._winheight // 2
self.update_cursor()
def move_to_bottom(self):
self._win_cursor = self._winheight
self.update_cursor()
def scroll_window_upwards(self):
self.scroll_up(self._scroll)
def scroll_window_downwards(self):
self.scroll_down(self._scroll)
def scroll_page_backwards(self):
self.scroll_up(self._winheight - 1)
def scroll_page_forwards(self):
self.scroll_down(self._winheight - 1)
def scroll_down(self, scroll):
if self._win_cursor + self._cursor < self._candidates_len:
if self._win_cursor <= 1:
self._win_cursor = 1
self._cursor = min(self._cursor + scroll,
self._candidates_len)
elif self._win_cursor < self._winheight:
self._win_cursor = min(
self._win_cursor + scroll,
self._candidates_len,
self._winheight)
else:
self._cursor = min(
self._cursor + scroll,
self._candidates_len - self._win_cursor)
else:
return
self.update_cursor()
def scroll_up(self, scroll):
if self._win_cursor > 1:
self._win_cursor = max(self._win_cursor - scroll, 1)
elif self._cursor > 0:
self._cursor = max(self._cursor - scroll, 0)
else:
return
self.update_cursor()
def scroll_window_up_one_line(self):
if self._cursor < 1:
return self.scroll_up(1)
self._cursor -= 1
self._win_cursor += 1
self.update_cursor()
def scroll_window_down_one_line(self):
if self._win_cursor <= 1 and self._cursor > 0:
return self.scroll_down(1)
self._cursor += 1
self._win_cursor -= 1
self.update_cursor()
def scroll_cursor_to_top(self):
self._cursor += self._win_cursor - 1
self._win_cursor = 1
self.update_cursor()
def scroll_cursor_to_middle(self):
self.scroll_cursor_to_top()
while self._cursor >= 1 and self._win_cursor < self._winheight // 2:
self.scroll_window_up_one_line()
def scroll_cursor_to_bottom(self):
self.scroll_cursor_to_top()
while self._cursor >= 1 and self._win_cursor < self._winheight:
self.scroll_window_up_one_line()
# def jump_to_next_by(self, key):
# keyfunc = self._keyfunc(key)
# keys = [keyfunc(candidate) for candidate in self._candidates]
# if not keys or len(set(keys)) == 1:
# return
# current_index = self._cursor + self._win_cursor - 1
# forward_candidates = self._candidates[current_index:]
# forward_sources = groupby(forward_candidates, keyfunc)
# forward_times = len(list(next(forward_sources)[1]))
# if not forward_times:
# return
# remaining_candidates = (self._candidates_len - current_index
# - forward_times)
# if next(forward_sources, None) is None:
# # If the cursor is on the last source
# self._cursor = 0
# self._win_cursor = 1
# elif self._candidates_len < self._winheight:
# # If there is a space under the candidates
# self._cursor = 0
# self._win_cursor += forward_times
# elif remaining_candidates < self._winheight:
# self._cursor = self._candidates_len - self._winheight + 1
# self._win_cursor = self._winheight - remaining_candidates
# else:
# self._cursor += forward_times + self._win_cursor - 1
# self._win_cursor = 1
# self.update_cursor()
# def jump_to_prev_by(self, key):
# keyfunc = self._keyfunc(key)
# keys = [keyfunc(candidate) for candidate in self._candidates]
# if not keys or len(set(keys)) == 1:
# return
# current_index = self._cursor + self._win_cursor - 1
# backward_candidates = reversed(self._candidates[:current_index + 1])
# backward_sources = groupby(backward_candidates, keyfunc)
# current_source = list(next(backward_sources)[1])
# try:
# prev_source = list(next(backward_sources)[1])
# except StopIteration: # If the cursor is on the first source
# last_source = takewhile(
# lambda candidate:
# keyfunc(candidate) == keyfunc(self._candidates[-1]),
# reversed(self._candidates)
# )
# len_last_source = len(list(last_source))
# if self._candidates_len < self._winheight:
# self._cursor = 0
# self._win_cursor = self._candidates_len - len_last_source + 1
# elif len_last_source < self._winheight:
# self._cursor = self._candidates_len - self._winheight + 1
# self._win_cursor = self._winheight - len_last_source
# else:
# self._cursor = self._candidates_len - len_last_source
# self._win_cursor = 1
# else:
# back_times = len(current_source) - 1 + len(prev_source)
# remaining_candidates = (self._candidates_len - current_index
# + back_times)
# if self._candidates_len < self._winheight:
# self._cursor = 0
# self._win_cursor -= back_times
# elif remaining_candidates < self._winheight:
# self._cursor = self._candidates_len - self._winheight + 1
# self._win_cursor = self._winheight - remaining_candidates
# else:
# self._cursor -= back_times - self._win_cursor + 1
# self._win_cursor = 1
# self.update_cursor()
def quick_move(self):
def get_quick_move_table():
table = {}
context = self._context
base = self._win_cursor
for [key, number] in context['quick_move_table'].items():
number = int(number)
pos = ((base - number) if context['reversed']
else (number + base))
if pos > 0:
table[key] = pos
return table
def quick_move_redraw(table, is_define):
bufnr = self._vim.current.buffer.number
for [key, number] in table.items():
signid = 2000 + number
name = 'denite_quick_move_' + str(number)
if is_define:
self._vim.command(
f'sign define {name} text={key} texthl=Special')
self._vim.command(
f'sign place {signid} name={name} '
f'line={number} buffer={bufnr}')
else:
self._vim.command(
f'silent! sign unplace {signid} buffer={bufnr}')
self._vim.command('silent! sign undefine ' + name)
quick_move_table = get_quick_move_table()
self._vim.command('echo "Input quick match key: "')
quick_move_redraw(quick_move_table, True)
self._vim.command('redraw')
char = ''
while char == '':
char = self._vim.call('nr2char',
self._vim.call('denite#util#getchar'))
quick_move_redraw(quick_move_table, False)
if (char not in quick_move_table or
quick_move_table[char] > self._winheight):
return
self._win_cursor = quick_move_table[char]
self.update_cursor()
if self._context['quick_move'] == 'immediately':
self.do_action('default')
return True
def _keyfunc(self, key):
def wrapped(candidate):
for k in key, 'action__' + key:
try:
return str(candidate[k])
except Exception:
pass
return ''
return wrapped
def enter_mode(self, mode):
if mode == self._current_mode:
return
self._mode_stack.append(self._current_mode)
self.change_mode(mode)
def leave_mode(self):
if not self._mode_stack:
return self.quit()
self._current_mode = self._mode_stack[-1]
self._mode_stack = self._mode_stack[:-1]
self.change_mode(self._current_mode)
def suspend(self):
if self._bufnr == self._vim.current.buffer.number:
if self._context['auto_resume']:
self._vim.command('autocmd denite WinEnter <buffer> ' +
'Denite -resume -buffer_name=' +
self._context['buffer_name'])
for mapping in ['i', 'a', '<CR>']:
self._vim.command(f'nnoremap <silent><buffer> {mapping} ' +
':<C-u>Denite -resume -buffer_name=' +
f"{self._context['buffer_name']}<CR>")
self._is_suspend = True
self._options['modifiable'] = False
return STATUS_ACCEPT
| 36.365807
| 79
| 0.560762
|
6bd719864180b59c3d21d158c7f7b73ceb5e6694
| 1,040
|
py
|
Python
|
tests/unit/chroma_core/lib/storage_plugin/subscription_plugin.py
|
beevans/integrated-manager-for-lustre
|
6b7e49b8a58058e6139ad815a4388f21a581dfa0
|
[
"MIT"
] | 52
|
2018-09-13T03:26:23.000Z
|
2022-03-25T16:51:37.000Z
|
tests/unit/chroma_core/lib/storage_plugin/subscription_plugin.py
|
beevans/integrated-manager-for-lustre
|
6b7e49b8a58058e6139ad815a4388f21a581dfa0
|
[
"MIT"
] | 1,264
|
2018-06-15T19:50:57.000Z
|
2022-03-28T08:19:04.000Z
|
tests/unit/chroma_core/lib/storage_plugin/subscription_plugin.py
|
beevans/integrated-manager-for-lustre
|
6b7e49b8a58058e6139ad815a4388f21a581dfa0
|
[
"MIT"
] | 27
|
2018-06-18T08:51:59.000Z
|
2022-03-16T15:35:34.000Z
|
from chroma_core.lib.storage_plugin.api import attributes
from chroma_core.lib.storage_plugin.api.identifiers import GlobalId, ScopedId
from chroma_core.lib.storage_plugin.api.plugin import Plugin
from chroma_core.lib.storage_plugin.api import resources
from chroma_core.lib.storage_plugin.api import relations
version = 1
class Controller(resources.ScannableResource):
class Meta:
identifier = GlobalId("address")
address = attributes.String()
class Lun(resources.LogicalDrive):
class Meta:
identifier = ScopedId("lun_id")
lun_id = attributes.String()
class Presentation(resources.Resource):
lun_id = attributes.String()
path = attributes.String()
host_id = attributes.Integer()
class Meta:
identifier = ScopedId("lun_id", "host_id")
relations = [
relations.Provide(provide_to=resources.DeviceNode, attributes=["host_id", "path"]),
relations.Subscribe(subscribe_to=Lun, attributes=["lun_id"]),
]
class TestPlugin(Plugin):
pass
| 26.666667
| 95
| 0.722115
|
b922aa64f421a6b224dbf8ab570f0bec98346ef3
| 1,527
|
py
|
Python
|
lib/spack/spack/build_systems/octave.py
|
MiddelkoopT/spack
|
4d94c4c4600f42a7a3bb3d06ec879140bc259304
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
lib/spack/spack/build_systems/octave.py
|
MiddelkoopT/spack
|
4d94c4c4600f42a7a3bb3d06ec879140bc259304
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
lib/spack/spack/build_systems/octave.py
|
MiddelkoopT/spack
|
4d94c4c4600f42a7a3bb3d06ec879140bc259304
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import inspect
from spack.directives import extends
from spack.package import PackageBase, run_after
class OctavePackage(PackageBase):
"""Specialized class for Octave packages. See
https://www.gnu.org/software/octave/doc/v4.2.0/Installing-and-Removing-Packages.html
for more information.
This class provides the following phases that can be overridden:
1. :py:meth:`~.OctavePackage.install`
"""
# Default phases
phases = ['install']
# To be used in UI queries that require to know which
# build-system class we are using
build_system_class = 'OctavePackage'
extends('octave')
def setup_build_environment(self, env):
# octave does not like those environment variables to be set:
env.unset('CC')
env.unset('CXX')
env.unset('FC')
def install(self, spec, prefix):
"""Install the package from the archive file"""
inspect.getmodule(self).octave(
'--quiet',
'--norc',
'--built-in-docstrings-file=/dev/null',
'--texi-macros-file=/dev/null',
'--eval', 'pkg prefix %s; pkg install %s' %
(prefix, self.stage.archive_file))
# Testing
# Check that self.prefix is there after installation
run_after('install')(PackageBase.sanity_check_prefix)
| 29.941176
| 88
| 0.664047
|
be035b6bb59219f1135476c3b79cd3e2653d2c4b
| 3,776
|
py
|
Python
|
tests/gui/run_analysis_test.py
|
sumau/PredictCode
|
e2a2d5a8fa5d83f011c33e18d4ce6ac7e1429aa8
|
[
"Artistic-2.0"
] | 18
|
2017-04-19T09:17:19.000Z
|
2021-05-24T08:53:28.000Z
|
tests/gui/run_analysis_test.py
|
sumau/PredictCode
|
e2a2d5a8fa5d83f011c33e18d4ce6ac7e1429aa8
|
[
"Artistic-2.0"
] | 8
|
2017-06-11T17:46:35.000Z
|
2021-06-07T10:49:10.000Z
|
tests/gui/run_analysis_test.py
|
sumau/PredictCode
|
e2a2d5a8fa5d83f011c33e18d4ce6ac7e1429aa8
|
[
"Artistic-2.0"
] | 10
|
2017-07-19T18:29:37.000Z
|
2020-11-12T22:06:45.000Z
|
import pytest
import unittest.mock as mock
import open_cp.gui.run_analysis as run_analysis
import open_cp.gui.predictors as predictors
from open_cp.gui.common import CoordType
import open_cp.gui.analysis as analysis
import open_cp.predictors
import datetime
@pytest.fixture
def log_queue():
import queue
return queue.Queue()
@pytest.fixture
def runAnalysis(log_queue):
predictors.set_queue_logging(log_queue)
class Model():
def selected_by_crime_type_data(self):
return self.times, self.xcoords, self.ycoords
def clone(self):
return self
model = Model()
model.analysis_tools_model = analysis.AnalysisToolsModel(model)
model.comparison_model = analysis.ComparisonModel(model)
model.coord_type = CoordType.XY
model.times = [datetime.datetime(2017,5,10,12,30)]
model.xcoords = [3]
model.ycoords = [17]
model.time_range = (datetime.datetime(2017,5,4,0,0), None,
datetime.datetime(2017,5,10,11,30),
datetime.datetime(2017,5,11,13,30))
model.analysis_tools_model.add(predictors.grid.GridProvider)
model.comparison_model.add(predictors.pred_type.PredType)
model.analysis_tools_model.add(predictors.naive.CountingGrid)
controller = mock.MagicMock()
controller.model = model
with mock.patch("open_cp.gui.tk.run_analysis_view.RunAnalysisView") as mock_view:
yield run_analysis.RunAnalysis(None, controller)
@pytest.fixture
def locator_mock():
with mock.patch("open_cp.gui.run_analysis.locator") as locator_mock:
yield locator_mock
def print_log(queue):
if not queue.empty():
import logging
formatter = logging.Formatter("{asctime} {levelname} : {message}", style="{")
while not queue.empty():
record = queue.get()
print(formatter.format(record))
def get_thread(locator_mock):
pool = locator_mock.get("pool")
assert len(pool.method_calls) == 1
name, args, kwargs = pool.method_calls[0]
assert name == "submit"
off_thread = args[0]
return off_thread
def test_controller_runs(runAnalysis, log_queue, locator_mock):
runAnalysis.run()
get_thread(locator_mock)
print_log(log_queue)
def test_model(runAnalysis):
model = run_analysis.RunAnalysisModel(runAnalysis, runAnalysis.main_model)
assert len(model.projectors) == 1
assert len(model.projectors['Coordinates already projected']) == 1
import open_cp.gui.predictors.lonlat
assert isinstance(model.projectors['Coordinates already projected'][0],
open_cp.gui.predictors.lonlat.PassThrough.Task)
assert len(model.grids) == 1
assert len(model.grids['Grid 100x100m @ (0m, 0m)']) == 1
import open_cp.gui.predictors.grid
assert isinstance(model.grids['Grid 100x100m @ (0m, 0m)'][0],
open_cp.gui.predictors.grid.GridProvider.Task)
assert len(model.grid_prediction_tasks) == 1
assert len(model.grid_prediction_tasks['Counting Grid naive predictor']) == 1
import open_cp.gui.predictors.naive
assert isinstance(model.grid_prediction_tasks['Counting Grid naive predictor'][0],
open_cp.gui.predictors.naive.CountingGrid.Task)
@pytest.fixture
def pool():
with mock.patch("open_cp.gui.run_analysis.pool") as pool_mock:
yield pool_mock
def test_controller_tasks(runAnalysis, log_queue, locator_mock):
runAnalysis.run()
off_thread = get_thread(locator_mock)
off_thread()
assert str(off_thread.results[0][0]) == "projection: Coordinates already projected, grid: Grid 100x100m @ (0m, 0m), prediction_type: Counting Grid naive predictor, prediction_date: 2017-05-10 00:00:00, prediction_length: 1 day, 0:00:00"
assert isinstance(off_thread.results[0][1], open_cp.predictors.GridPredictionArray)
| 37.386139
| 240
| 0.724576
|
cf5a1a86e8f8a65b87b8f5ec5394f6261c05b150
| 1,820
|
py
|
Python
|
vendor/munin/gearman.py
|
Paul3MK/NewsBlur
|
f912d100c2867e5366fca92abadc50d4253a41d8
|
[
"MIT"
] | 3,073
|
2015-01-01T07:20:18.000Z
|
2022-03-31T20:33:41.000Z
|
vendor/munin/gearman.py
|
Paul3MK/NewsBlur
|
f912d100c2867e5366fca92abadc50d4253a41d8
|
[
"MIT"
] | 1,054
|
2015-01-02T13:32:35.000Z
|
2022-03-30T04:21:21.000Z
|
vendor/munin/gearman.py
|
Paul3MK/NewsBlur
|
f912d100c2867e5366fca92abadc50d4253a41d8
|
[
"MIT"
] | 676
|
2015-01-03T16:40:29.000Z
|
2022-03-30T14:00:40.000Z
|
#!/usr/bin/env python
import os
import re
import socket
from vendor.munin import MuninPlugin
worker_re = re.compile(r'^(?P<fd>\d+) (?P<ip>[\d\.]+) (?P<client_id>[^\s]+) :\s?(?P<abilities>.*)$')
class MuninGearmanPlugin(MuninPlugin):
category = "Gearman"
def __init__(self):
super(MuninGearmanPlugin, self).__init__()
addr = os.environ.get('GM_SERVER') or "127.0.0.1"
port = int(addr.split(':')[-1]) if ':' in addr else 4730
host = addr.split(':')[0]
self.addr = (host, port)
self._sock = None
def connect(self):
if not self._sock:
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.connect(self.addr)
return self._sock
def disconnect(self):
if self._sock:
self._sock.close()
def get_workers(self):
sock = self.connect()
sock.send("workers\n")
buf = ""
while ".\n" not in buf:
buf += sock.recv(8192)
info = []
for l in buf.split('\n'):
if l.strip() == '.':
break
m = worker_re.match(l)
i = m.groupdict()
i['abilities'] = [x for x in i['abilities'].split(' ') if x]
info.append(i)
return info
def get_status(self):
sock = self.connect()
sock.send("status\n")
buf = ""
while ".\n" not in buf:
buf += sock.recv(8192)
info = {}
for l in buf.split('\n'):
l = l.strip()
if l == '.':
break
counts = l.split('\t')
info[counts[0]] = dict(
total = int(counts[1]),
running = int(counts[2]),
workers = int(counts[3]),
)
return info
| 27.164179
| 100
| 0.487912
|
4da35b601a682f0b7a6379fe5e1963ca0fe22dd4
| 8,133
|
py
|
Python
|
views.py
|
WPRDC/property-api
|
980f541b07bef3c8842994cfb903b42cc2c25064
|
[
"MIT"
] | 1
|
2021-10-01T18:35:46.000Z
|
2021-10-01T18:35:46.000Z
|
views.py
|
WPRDC/property-api
|
980f541b07bef3c8842994cfb903b42cc2c25064
|
[
"MIT"
] | null | null | null |
views.py
|
WPRDC/property-api
|
980f541b07bef3c8842994cfb903b42cc2c25064
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
import time
import json
import csv
from collections import OrderedDict as OD, defaultdict
from .models import CKANResource
from .utils import get_data, get_batch_data, carto_intersect, to_geojson, to_csv, data_in_shape, get_parcels, \
get_owner_name
from .tasks import async_data_in_shape
DATATYPES = ['json', 'geojson', 'csv', 'carto']
def index(request):
return render(request, 'index.html')
def single(request):
try:
pin = request.GET['parcel_id']
except KeyError:
return JsonResponse({'success': False, 'help': 'parcel_id required'}, status=400)
resources = CKANResource.objects.all()
failed_searches = []
data = {}
geo = {}
for resource in resources:
success, data[resource.slug] = get_data(pin, resource)
if not success:
failed_searches.append(resource.name)
if success and resource.has_geo:
try:
geo = {'latitude': data[resource.slug][0][resource.lat_field],
'longitude': data[resource.slug][0][resource.lon_field]}
except:
geo = {'latitude': '', 'longitude': ''}
response = OD(
[('success', True),
('help', 'Data for parcel {}.'.format(pin)),
('geo', geo),
('owner', get_owner_name(pin)),
('results', data),
('failed_searches', failed_searches), ]
)
return JsonResponse(response)
def single_parcel(request, pin=""):
if not pin:
return JsonResponse({'success': False, 'help': 'parcel_id required'}, status=400)
resources = CKANResource.objects.all()
failed_searches = []
data = {}
geo = {}
for resource in resources:
success, data[resource.slug] = get_data(pin, resource)
if not success:
failed_searches.append(resource.name)
if success and resource.has_geo:
geo = {
'centroid': {
'type': 'Point',
'coordinates': [data[resource.slug][0][resource.lon_field],
data[resource.slug][0][resource.lat_field]]
},
'boundary': {}
}
response = OD(
[('success', True),
('help', 'Data for parcel {}.'.format(pin)),
('geo', geo),
('owner', get_owner_name(pin)),
('data', data),
('failed_searches', failed_searches), ]
)
return JsonResponse(response)
def batch(request):
try:
pins = request.GET['parcel_ids']
except KeyError:
return JsonResponse({'success': False, 'help': 'parcel_ids required'}, status=400)
resources = CKANResource.objects.all()
failed_searches, data, geo = [], {}, {}
pins = pins.split(',')
for resource in resources:
success, data[resource.slug] = get_batch_data(pins, resource)
if not success:
failed_searches.append(resource.name)
response = OD(
[('success', True),
('help', 'Data for parcel {}.'.format(pins)),
('geo', geo),
('results', data),
('failed_searches', failed_searches), ]
)
return JsonResponse(response)
def within(request):
try:
shape = request.GET['shape']
except KeyError:
return JsonResponse({'success': False, 'help': 'must shape of region you want to search in'})
status, pins = carto_intersect(shape)
response = {'success': True, 'help': '', 'pins': pins}
if response != 200:
response['success'] = False
response['help'] = 'call to carto failed'
return JsonResponse(response, status=status)
def address_search(request):
try:
num = request.GET['number']
street = request.GET['street']
city = request.GET['city']
zip = request.GET['zip']
except KeyError:
return JsonResponse({'success': False, 'help': 'must submit street number, street name, city and zip code'},
status=400)
@csrf_exempt
def data_within(request):
# Get shape from request, if not present return error
try:
shape = request.POST['shape']
except KeyError:
return JsonResponse({'success': False, 'help': 'must shape of region you want to search in'}, status=400)
# Get fields from request and convert to dict keyed by resource
fields = {}
if 'fields' in request.POST:
fs = json.loads(request.POST['fields'])
for f in fs:
if f['r'] in fields:
fields[f['r']].append(f['f'])
else:
fields[f['r']] = [f['f']]
# data, fields_set = async_data_in_shape(shape, fields)
getter = async_data_in_shape.delay(shape, fields)
# data, fields_set = getter.get()
return JsonResponse({'job_id': getter.id})
def get_collected_data(request):
if 'job' in request.GET:
job_id = request.GET['job']
else:
return HttpResponse('No job id given.', status=400)
# Get data type
if 'type' not in request.GET:
datatype = 'json'
else:
datatype = request.GET['type']
if datatype not in DATATYPES:
return JsonResponse({'success': False, 'help': datatype + ' is not a valid datatype'}, status=400)
job = async_data_in_shape.AsyncResult(job_id)
if job.ready():
data, fields_set = job.get()
else:
return HttpResponse('Job not ready.', status=400)
if datatype == 'json':
response = {'success': True, 'help': '', 'data': data}
return JsonResponse(response, status=200)
elif datatype == 'geojson':
data = to_geojson(data, fields_set)
response = HttpResponse(content_type='text/json')
response['Content-Disposition'] = 'attachment; filename="parcel_data.geojson"'
json.dump(data, response)
return response
elif datatype == 'csv':
print('making csv', time.clock())
data, new_fields = to_csv(data, fields_set)
print('made csv', time.clock())
fields_set = ['PIN', 'geom'] + new_fields
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="parcel_data.csv"'
dwriter = csv.DictWriter(response, fieldnames=fields_set)
dwriter.writeheader()
dwriter.writerows(data)
print('done', time.clock())
return response
def get_progress(request):
""" A view to report the progress to the user """
if 'job' in request.GET:
job_id = request.GET['job']
else:
return HttpResponse('No job id given.')
job = async_data_in_shape.AsyncResult(job_id)
if job.state == "PROGRESS":
data = job.result
elif job.state == "SUCCESS":
data = {'task': 'Complete', 'percent': 100}
else:
data = {'task': 'Starting', 'percent': 0}
return JsonResponse(data)
#################
## BETA ##
###############################################################################
def beta_parcels(request, parcel_ids=None):
resources = CKANResource.objects.all()
failed_searches, data, geo = [], {}, {}
print("IDs: " + parcel_ids)
response = OD(
[('success', False),
('help', 'Data for parcels'),
('results', []),
('failed_searches', failed_searches), ]
)
if parcel_ids:
pins = parcel_ids.split(',')
print("PINs: " + str(pins))
results, failed_searches = get_parcels(pins, resources)
response['success'] = True
response['results'] = results
response['failed_searches'] = failed_searches
return JsonResponse(response)
else:
response['help'] = 'No parcel IDs Provided'
pass
| 31.041985
| 117
| 0.567441
|
7c75a03a1d2f6777905ce47f27715bd3855a8d75
| 18,758
|
py
|
Python
|
scipy/interpolate/tests/test_polyint.py
|
frewsxcv/scipy
|
b8b612c54c60e22e2a186dde6264de046ab1fe2b
|
[
"BSD-3-Clause"
] | 1
|
2019-04-27T16:04:14.000Z
|
2019-04-27T16:04:14.000Z
|
scipy/interpolate/tests/test_polyint.py
|
joferkington/scipy
|
6a7327e8bb8248b2ea165180bc602edf1ab33dda
|
[
"BSD-3-Clause"
] | 5
|
2021-03-19T08:36:48.000Z
|
2022-01-13T01:52:34.000Z
|
scipy/interpolate/tests/test_polyint.py
|
joferkington/scipy
|
6a7327e8bb8248b2ea165180bc602edf1ab33dda
|
[
"BSD-3-Clause"
] | 1
|
2019-08-13T21:23:57.000Z
|
2019-08-13T21:23:57.000Z
|
from __future__ import division, print_function, absolute_import
import warnings
from numpy.testing import (assert_almost_equal, assert_array_equal,
TestCase, run_module_suite, assert_allclose, assert_equal, assert_)
from scipy.interpolate import (KroghInterpolator, krogh_interpolate,
BarycentricInterpolator, barycentric_interpolate,
PiecewisePolynomial, piecewise_polynomial_interpolate,
approximate_taylor_polynomial, pchip, PchipInterpolator)
from scipy.lib.six import xrange
import scipy
import numpy as np
from scipy.interpolate import splrep, splev
def check_shape(interpolator_cls, x_shape, y_shape, deriv_shape=None, axis=0):
np.random.seed(1234)
x = [-1, 0, 1]
s = list(range(1, len(y_shape)+1))
s.insert(axis % (len(y_shape)+1), 0)
y = np.random.rand(*((3,) + y_shape)).transpose(s)
# Cython code chokes on y.shape = (0, 3) etc, skip them
if y.size == 0:
return
xi = np.zeros(x_shape)
yi = interpolator_cls(x, y, axis=axis)(xi)
target_shape = ((deriv_shape or ()) + y.shape[:axis]
+ x_shape + y.shape[axis:][1:])
assert_equal(yi.shape, target_shape)
# check it works also with lists
if x_shape and y.size > 0:
interpolator_cls(list(x), list(y), axis=axis)(list(xi))
# check also values
if xi.size > 0 and deriv_shape is None:
bs_shape = (y.shape[:axis] + ((1,)*len(x_shape)) + y.shape[axis:][1:])
yv = y[((slice(None,None,None),)*(axis % y.ndim))+(1,)].reshape(bs_shape)
yi, y = np.broadcast_arrays(yi, yv)
assert_allclose(yi, y)
SHAPES = [(), (0,), (1,), (3,2,5)]
def test_shapes():
for ip in [KroghInterpolator, BarycentricInterpolator, pchip]:
for s1 in SHAPES:
for s2 in SHAPES:
for axis in range(-len(s2), len(s2)):
yield check_shape, ip, s1, s2, None, axis
def test_derivs_shapes():
def krogh_derivs(x, y, axis=0):
return KroghInterpolator(x, y, axis).derivatives
for s1 in SHAPES:
for s2 in SHAPES:
for axis in range(-len(s2), len(s2)):
yield check_shape, krogh_derivs, s1, s2, (3,), axis
def test_deriv_shapes():
def krogh_deriv(x, y, axis=0):
return KroghInterpolator(x, y, axis).derivative
def pchip_deriv(x, y, axis=0):
return pchip(x, y, axis).derivative()
def pchip_deriv2(x, y, axis=0):
return pchip(x, y, axis).derivative(2)
def pchip_deriv_inplace(x, y, axis=0):
class P(PchipInterpolator):
def __call__(self, x):
return PchipInterpolator.__call__(self, x, 1)
pass
return P(x, y, axis)
for ip in [krogh_deriv, pchip_deriv, pchip_deriv2, pchip_deriv_inplace]:
for s1 in SHAPES:
for s2 in SHAPES:
for axis in range(-len(s2), len(s2)):
yield check_shape, ip, s1, s2, (), axis
def _check_complex(ip):
x = [1, 2, 3, 4]
y = [1, 2, 1j, 3]
p = ip(x, y)
assert_allclose(y, p(x))
def test_complex():
for ip in [KroghInterpolator, BarycentricInterpolator, pchip]:
yield _check_complex, ip
class CheckKrogh(TestCase):
def setUp(self):
self.true_poly = scipy.poly1d([-2,3,1,5,-4])
self.test_xs = np.linspace(-1,1,100)
self.xs = np.linspace(-1,1,5)
self.ys = self.true_poly(self.xs)
def test_lagrange(self):
P = KroghInterpolator(self.xs,self.ys)
assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
def test_scalar(self):
P = KroghInterpolator(self.xs,self.ys)
assert_almost_equal(self.true_poly(7),P(7))
assert_almost_equal(self.true_poly(np.array(7)), P(np.array(7)))
def test_derivatives(self):
P = KroghInterpolator(self.xs,self.ys)
D = P.derivatives(self.test_xs)
for i in xrange(D.shape[0]):
assert_almost_equal(self.true_poly.deriv(i)(self.test_xs),
D[i])
def test_low_derivatives(self):
P = KroghInterpolator(self.xs,self.ys)
D = P.derivatives(self.test_xs,len(self.xs)+2)
for i in xrange(D.shape[0]):
assert_almost_equal(self.true_poly.deriv(i)(self.test_xs),
D[i])
def test_derivative(self):
P = KroghInterpolator(self.xs,self.ys)
m = 10
r = P.derivatives(self.test_xs,m)
for i in xrange(m):
assert_almost_equal(P.derivative(self.test_xs,i),r[i])
def test_high_derivative(self):
P = KroghInterpolator(self.xs,self.ys)
for i in xrange(len(self.xs),2*len(self.xs)):
assert_almost_equal(P.derivative(self.test_xs,i),
np.zeros(len(self.test_xs)))
def test_hermite(self):
xs = [0,0,0,1,1,1,2]
ys = [self.true_poly(0),
self.true_poly.deriv(1)(0),
self.true_poly.deriv(2)(0),
self.true_poly(1),
self.true_poly.deriv(1)(1),
self.true_poly.deriv(2)(1),
self.true_poly(2)]
P = KroghInterpolator(self.xs,self.ys)
assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
def test_vector(self):
xs = [0, 1, 2]
ys = np.array([[0,1],[1,0],[2,1]])
P = KroghInterpolator(xs,ys)
Pi = [KroghInterpolator(xs,ys[:,i]) for i in xrange(ys.shape[1])]
test_xs = np.linspace(-1,3,100)
assert_almost_equal(P(test_xs),
np.rollaxis(np.asarray([p(test_xs) for p in Pi]),-1))
assert_almost_equal(P.derivatives(test_xs),
np.transpose(np.asarray([p.derivatives(test_xs) for p in Pi]),
(1,2,0)))
def test_empty(self):
P = KroghInterpolator(self.xs,self.ys)
assert_array_equal(P([]), [])
def test_shapes_scalarvalue(self):
P = KroghInterpolator(self.xs,self.ys)
assert_array_equal(np.shape(P(0)), ())
assert_array_equal(np.shape(P(np.array(0))), ())
assert_array_equal(np.shape(P([0])), (1,))
assert_array_equal(np.shape(P([0,1])), (2,))
def test_shapes_scalarvalue_derivative(self):
P = KroghInterpolator(self.xs,self.ys)
n = P.n
assert_array_equal(np.shape(P.derivatives(0)), (n,))
assert_array_equal(np.shape(P.derivatives(np.array(0))), (n,))
assert_array_equal(np.shape(P.derivatives([0])), (n,1))
assert_array_equal(np.shape(P.derivatives([0,1])), (n,2))
def test_shapes_vectorvalue(self):
P = KroghInterpolator(self.xs,np.outer(self.ys,np.arange(3)))
assert_array_equal(np.shape(P(0)), (3,))
assert_array_equal(np.shape(P([0])), (1,3))
assert_array_equal(np.shape(P([0,1])), (2,3))
def test_shapes_1d_vectorvalue(self):
P = KroghInterpolator(self.xs,np.outer(self.ys,[1]))
assert_array_equal(np.shape(P(0)), (1,))
assert_array_equal(np.shape(P([0])), (1,1))
assert_array_equal(np.shape(P([0,1])), (2,1))
def test_shapes_vectorvalue_derivative(self):
P = KroghInterpolator(self.xs,np.outer(self.ys,np.arange(3)))
n = P.n
assert_array_equal(np.shape(P.derivatives(0)), (n,3))
assert_array_equal(np.shape(P.derivatives([0])), (n,1,3))
assert_array_equal(np.shape(P.derivatives([0,1])), (n,2,3))
def test_wrapper(self):
P = KroghInterpolator(self.xs,self.ys)
assert_almost_equal(P(self.test_xs),krogh_interpolate(self.xs,self.ys,self.test_xs))
assert_almost_equal(P.derivative(self.test_xs,2),krogh_interpolate(self.xs,self.ys,self.test_xs,der=2))
assert_almost_equal(P.derivatives(self.test_xs,2),krogh_interpolate(self.xs,self.ys,self.test_xs,der=[0,1]))
def test_int_inputs(self):
# Check input args are cast correctly to floats, gh-3669
x = [0, 234,468,702,936,1170,1404,2340,3744,6084,8424,13104,60000]
offset_cdf = np.array([-0.95, -0.86114777, -0.8147762, -0.64072425, -0.48002351,
-0.34925329, -0.26503107, -0.13148093, -0.12988833, -0.12979296,
-0.12973574, -0.08582937, 0.05])
f = KroghInterpolator(x, offset_cdf)
assert_allclose(abs((f(x) - offset_cdf) / f.derivative(x, 1)), 0, atol=1e-10)
class CheckTaylor(TestCase):
def test_exponential(self):
degree = 5
p = approximate_taylor_polynomial(np.exp, 0, degree, 1, 15)
for i in xrange(degree+1):
assert_almost_equal(p(0),1)
p = p.deriv()
assert_almost_equal(p(0),0)
class CheckBarycentric(TestCase):
def setUp(self):
self.true_poly = scipy.poly1d([-2,3,1,5,-4])
self.test_xs = np.linspace(-1,1,100)
self.xs = np.linspace(-1,1,5)
self.ys = self.true_poly(self.xs)
def test_lagrange(self):
P = BarycentricInterpolator(self.xs,self.ys)
assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
def test_scalar(self):
P = BarycentricInterpolator(self.xs,self.ys)
assert_almost_equal(self.true_poly(7),P(7))
assert_almost_equal(self.true_poly(np.array(7)),P(np.array(7)))
def test_delayed(self):
P = BarycentricInterpolator(self.xs)
P.set_yi(self.ys)
assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
def test_append(self):
P = BarycentricInterpolator(self.xs[:3],self.ys[:3])
P.add_xi(self.xs[3:],self.ys[3:])
assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
def test_vector(self):
xs = [0, 1, 2]
ys = np.array([[0,1],[1,0],[2,1]])
P = BarycentricInterpolator(xs,ys)
Pi = [BarycentricInterpolator(xs,ys[:,i]) for i in xrange(ys.shape[1])]
test_xs = np.linspace(-1,3,100)
assert_almost_equal(P(test_xs),
np.rollaxis(np.asarray([p(test_xs) for p in Pi]),-1))
def test_shapes_scalarvalue(self):
P = BarycentricInterpolator(self.xs,self.ys)
assert_array_equal(np.shape(P(0)), ())
assert_array_equal(np.shape(P(np.array(0))), ())
assert_array_equal(np.shape(P([0])), (1,))
assert_array_equal(np.shape(P([0,1])), (2,))
def test_shapes_vectorvalue(self):
P = BarycentricInterpolator(self.xs,np.outer(self.ys,np.arange(3)))
assert_array_equal(np.shape(P(0)), (3,))
assert_array_equal(np.shape(P([0])), (1,3))
assert_array_equal(np.shape(P([0,1])), (2,3))
def test_shapes_1d_vectorvalue(self):
P = BarycentricInterpolator(self.xs,np.outer(self.ys,[1]))
assert_array_equal(np.shape(P(0)), (1,))
assert_array_equal(np.shape(P([0])), (1,1))
assert_array_equal(np.shape(P([0,1])), (2,1))
def test_wrapper(self):
P = BarycentricInterpolator(self.xs,self.ys)
assert_almost_equal(P(self.test_xs),barycentric_interpolate(self.xs,self.ys,self.test_xs))
class CheckPiecewise(TestCase):
def setUp(self):
self.tck = splrep([0,1,2,3,4,5], [0,10,-1,3,7,2], s=0)
self.test_xs = np.linspace(-1,6,100)
self.spline_ys = splev(self.test_xs, self.tck)
self.spline_yps = splev(self.test_xs, self.tck, der=1)
self.xi = np.unique(self.tck[0])
self.yi = [[splev(x, self.tck, der=j) for j in xrange(3)] for x in self.xi]
def test_construction(self):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
P = PiecewisePolynomial(self.xi, self.yi, 3)
assert_almost_equal(P(self.test_xs), self.spline_ys)
def test_scalar(self):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
P = PiecewisePolynomial(self.xi,self.yi,3)
assert_almost_equal(P(self.test_xs[0]),self.spline_ys[0])
assert_almost_equal(P.derivative(self.test_xs[0],1),self.spline_yps[0])
assert_almost_equal(P(np.array(self.test_xs[0])),self.spline_ys[0])
assert_almost_equal(P.derivative(np.array(self.test_xs[0]),1),
self.spline_yps[0])
def test_derivative(self):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
P = PiecewisePolynomial(self.xi,self.yi,3)
assert_almost_equal(P.derivative(self.test_xs,1),self.spline_yps)
def test_derivatives(self):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
P = PiecewisePolynomial(self.xi,self.yi,3)
m = 4
r = P.derivatives(self.test_xs,m)
#print r.shape, r
for i in xrange(m):
assert_almost_equal(P.derivative(self.test_xs,i),r[i])
def test_vector(self):
xs = [0, 1, 2]
ys = [[[0,1]],[[1,0],[-1,-1]],[[2,1]]]
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
P = PiecewisePolynomial(xs,ys)
Pi = [PiecewisePolynomial(xs,[[yd[i] for yd in y] for y in ys])
for i in xrange(len(ys[0][0]))]
test_xs = np.linspace(-1,3,100)
assert_almost_equal(P(test_xs),
np.rollaxis(np.asarray([p(test_xs) for p in Pi]),-1))
assert_almost_equal(P.derivative(test_xs,1),
np.transpose(np.asarray([p.derivative(test_xs,1) for p in Pi]),
(1,0)))
def test_incremental(self):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
P = PiecewisePolynomial([self.xi[0]], [self.yi[0]], 3)
for i in xrange(1,len(self.xi)):
P.append(self.xi[i],self.yi[i],3)
assert_almost_equal(P(self.test_xs),self.spline_ys)
def test_shapes_scalarvalue(self):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
P = PiecewisePolynomial(self.xi,self.yi,4)
assert_array_equal(np.shape(P(0)), ())
assert_array_equal(np.shape(P(np.array(0))), ())
assert_array_equal(np.shape(P([0])), (1,))
assert_array_equal(np.shape(P([0,1])), (2,))
def test_shapes_scalarvalue_derivative(self):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
P = PiecewisePolynomial(self.xi,self.yi,4)
n = 4
assert_array_equal(np.shape(P.derivative(0,1)), ())
assert_array_equal(np.shape(P.derivative(np.array(0),1)), ())
assert_array_equal(np.shape(P.derivative([0],1)), (1,))
assert_array_equal(np.shape(P.derivative([0,1],1)), (2,))
def test_shapes_vectorvalue(self):
yi = np.multiply.outer(np.asarray(self.yi),np.arange(3))
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
P = PiecewisePolynomial(self.xi,yi,4)
assert_array_equal(np.shape(P(0)), (3,))
assert_array_equal(np.shape(P([0])), (1,3))
assert_array_equal(np.shape(P([0,1])), (2,3))
def test_shapes_vectorvalue_1d(self):
yi = np.multiply.outer(np.asarray(self.yi),np.arange(1))
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
P = PiecewisePolynomial(self.xi,yi,4)
assert_array_equal(np.shape(P(0)), (1,))
assert_array_equal(np.shape(P([0])), (1,1))
assert_array_equal(np.shape(P([0,1])), (2,1))
def test_shapes_vectorvalue_derivative(self):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
P = PiecewisePolynomial(self.xi, np.multiply.outer(self.yi,
np.arange(3)),4)
n = 4
assert_array_equal(np.shape(P.derivative(0,1)), (3,))
assert_array_equal(np.shape(P.derivative([0],1)), (1,3))
assert_array_equal(np.shape(P.derivative([0,1],1)), (2,3))
def test_wrapper(self):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
P = PiecewisePolynomial(self.xi,self.yi)
assert_almost_equal(P(self.test_xs),
piecewise_polynomial_interpolate(self.xi, self.yi,
self.test_xs))
assert_almost_equal(P.derivative(self.test_xs,2),
piecewise_polynomial_interpolate(self.xi,
self.yi,
self.test_xs,
der=2))
assert_almost_equal(P.derivatives(self.test_xs,2),
piecewise_polynomial_interpolate(self.xi,
self.yi,
self.test_xs,
der=[0,1]))
class TestPCHIP(TestCase):
def _make_random(self, npts=20):
np.random.seed(1234)
xi = np.sort(np.random.random(npts))
yi = np.random.random(npts)
return pchip(xi, yi), xi, yi
def test_overshoot(self):
# PCHIP should not overshoot
p, xi, yi = self._make_random()
for i in range(len(xi)-1):
x1, x2 = xi[i], xi[i+1]
y1, y2 = yi[i], yi[i+1]
if y1 > y2:
y1, y2 = y2, y1
xp = np.linspace(x1, x2, 10)
yp = p(xp)
assert_(((y1 <= yp) & (yp <= y2)).all())
def test_monotone(self):
# PCHIP should preserve monotonicty
p, xi, yi = self._make_random()
for i in range(len(xi)-1):
x1, x2 = xi[i], xi[i+1]
y1, y2 = yi[i], yi[i+1]
xp = np.linspace(x1, x2, 10)
yp = p(xp)
assert_(((y2-y1) * (yp[1:] - yp[:1]) > 0).all())
def test_cast(self):
# regression test for integer input data, see gh-3453
data = np.array([[0, 4, 12, 27, 47, 60, 79, 87, 99, 100],
[-33, -33, -19, -2, 12, 26, 38, 45, 53, 55]])
xx = np.arange(100)
curve = pchip(data[0], data[1])(xx)
data1 = data * 1.0
curve1 = pchip(data1[0], data1[1])(xx)
assert_allclose(curve, curve1, atol=1e-14, rtol=1e-14)
if __name__ == '__main__':
run_module_suite()
| 38.997921
| 116
| 0.590521
|
4bed5f6a1adaacbf3883edf68cc1d1aede9766b7
| 5,713
|
py
|
Python
|
blesuite/replay/btsnoop/btsnoop/btsnoop.py
|
dbisu/BLESuite
|
37dccb23e14661bef148790a4e7c44e664fc528f
|
[
"MIT"
] | null | null | null |
blesuite/replay/btsnoop/btsnoop/btsnoop.py
|
dbisu/BLESuite
|
37dccb23e14661bef148790a4e7c44e664fc528f
|
[
"MIT"
] | null | null | null |
blesuite/replay/btsnoop/btsnoop/btsnoop.py
|
dbisu/BLESuite
|
37dccb23e14661bef148790a4e7c44e664fc528f
|
[
"MIT"
] | null | null | null |
"""
Parse btsnoop_hci.log binary data (similar to wireshark)
usage:
./parse.py <filename>
"""
import datetime
import sys
import struct
"""
Record flags conform to:
- bit 0 0 = sent, 1 = received
- bit 1 0 = data, 1 = command/event
- bit 2-31 reserved
Direction is relative to host / DTE. i.e. for Bluetooth controllers,
Send is Host->Controller, Receive is Controller->Host
"""
BTSNOOP_FLAGS = {
0 : ("host", "controller", "data"),
1 : ("controller", "host", "data"),
2 : ("host", "controller", "command"),
3 : ("controller", "host", "event")
}
def parse(filename):
"""
Parse a Btsnoop packet capture file.
Btsnoop packet capture file is structured as:
-----------------------
| header |
-----------------------
| packet record nbr 1 |
-----------------------
| packet record nbr 2 |
-----------------------
| ... |
-----------------------
| packet record nbr n |
-----------------------
References can be found here:
* http://tools.ietf.org/html/rfc1761
* http://www.fte.com/webhelp/NFC/Content/Technical_Information/BT_Snoop_File_Format.htm
Return a list of records, each holding a tuple of:
* sequence nbr
* record length (in bytes)
* flags
* timestamp
* data
"""
with open(filename, "rb") as f:
# Validate file header
(identification, version, type) = _read_file_header(f)
_validate_file_header(identification, version, type)
# Not using the following data:
# record[1] - original length
# record[4] - cumulative drops
return map(lambda record:
(record[0], record[2], record[3], _parse_time(record[5]), record[6]),
_read_packet_records(f))
def _read_file_header(f):
"""
Header should conform to the following format
----------------------------------------
| identification pattern|
| 8 bytes |
----------------------------------------
| version number |
| 4 bytes |
----------------------------------------
| data link type = HCI UART (H4) |
| 4 bytes |
----------------------------------------
All integer values are stored in "big-endian" order, with the high-order bits first.
"""
ident = f.read(8)
version, data_link_type = struct.unpack( ">II", f.read(4 + 4) )
return (ident, version, data_link_type)
def _validate_file_header(identification, version, data_link_type):
"""
The identification pattern should be:
'btsnoop\0'
The version number should be:
1
The data link type can be:
- Reserved 0 - 1000
- Un-encapsulated HCI (H1) 1001
- HCI UART (H4) 1002
- HCI BSCP 1003
- HCI Serial (H5) 1004
- Unassigned 1005 - 4294967295
For SWAP, data link type should be:
HCI UART (H4) 1002
"""
assert identification == "btsnoop\0"
assert version == 1
assert data_link_type == 1002
print ("Btsnoop capture file version {0}, type {1}".format(version, data_link_type))
def _read_packet_records(f):
"""
A record should confirm to the following format
--------------------------
| original length |
| 4 bytes
--------------------------
| included length |
| 4 bytes
--------------------------
| packet flags |
| 4 bytes
--------------------------
| cumulative drops |
| 4 bytes
--------------------------
| timestamp microseconds |
| 8 bytes
--------------------------
| packet data |
--------------------------
All integer values are stored in "big-endian" order, with the high-order bits first.
"""
seq_nbr = 1
while True:
pkt_hdr = f.read(4 + 4 + 4 + 4 + 8)
if not pkt_hdr or len(pkt_hdr) != 24:
# EOF
break
orig_len, inc_len, flags, drops, time64 = struct.unpack( ">IIIIq", pkt_hdr)
assert orig_len == inc_len
data = f.read(inc_len)
assert len(data) == inc_len
yield ( seq_nbr, orig_len, inc_len, flags, drops, time64, data )
seq_nbr += 1
def _parse_time(time):
"""
Record time is a 64-bit signed integer representing the time of packet arrival,
in microseconds since midnight, January 1st, 0 AD nominal Gregorian.
In order to avoid leap-day ambiguity in calculations, note that an equivalent
epoch may be used of midnight, January 1st 2000 AD, which is represented in
this field as 0x00E03AB44A676000.
"""
time_betw_0_and_2000_ad = int("0x00E03AB44A676000", 16)
time_since_2000_epoch = datetime.timedelta(microseconds=time) - datetime.timedelta(microseconds=time_betw_0_and_2000_ad)
return datetime.datetime(2000, 1, 1) + time_since_2000_epoch
def flags_to_str(flags):
"""
Returns a tuple of (src, dst, type)
"""
assert flags in [0,1,2,3]
return BTSNOOP_FLAGS[flags]
def print_hdr():
"""
Print the script header
"""
print ("")
print ("##############################")
print ("# #")
print ("# btsnoop parser v0.1 #")
print ("# #")
print ("##############################")
print ("")
def main(filename):
records = parse(filename)
print (records)
return 0
if __name__ == "__main__":
if len(sys.argv) < 2:
print (__doc__)
sys.exit(1)
print_hdr()
sys.exit(main(sys.argv[1]))
| 27.466346
| 124
| 0.519692
|
7e2b189c5a5c71e6b671e735ed4c65b38bebd870
| 2,034
|
py
|
Python
|
alf/examples/ddpg_pendulum_conf.py
|
breakds/alf
|
b3d60048daee2c9625ba44f778e49570d0d029a7
|
[
"Apache-2.0"
] | 1
|
2021-11-17T17:08:04.000Z
|
2021-11-17T17:08:04.000Z
|
alf/examples/ddpg_pendulum_conf.py
|
ipsec/alf
|
15fd71896eac5ad0987dbe14a9f630b32e0e131f
|
[
"Apache-2.0"
] | null | null | null |
alf/examples/ddpg_pendulum_conf.py
|
ipsec/alf
|
15fd71896eac5ad0987dbe14a9f630b32e0e131f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2022 Horizon Robotics and ALF Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import alf
from alf.algorithms.ddpg_algorithm import DdpgAlgorithm
from alf.algorithms.one_step_loss import OneStepTDLoss
from alf.networks import ActorNetwork, CriticNetwork
from alf.optimizers import Adam
from alf.utils.losses import element_wise_huber_loss
# include default ddpg config
from alf.examples import ddpg_conf
# environment config
alf.config(
'create_environment', env_name='Pendulum-v0', num_parallel_environments=1)
hidden_layers = (100, 100)
actor_network_cls = functools.partial(
ActorNetwork, fc_layer_params=hidden_layers)
critic_network_cls = functools.partial(
CriticNetwork, joint_fc_layer_params=hidden_layers)
critic_optimizer = Adam(lr=1e-3)
actor_optimizer = Adam(lr=1e-4)
alf.config(
'DdpgAlgorithm',
actor_network_ctor=actor_network_cls,
critic_network_ctor=critic_network_cls,
actor_optimizer=actor_optimizer,
critic_optimizer=critic_optimizer,
target_update_period=5)
alf.config('OneStepTDLoss', td_error_loss_fn=element_wise_huber_loss)
# training config
alf.config(
'TrainerConfig',
initial_collect_steps=1000,
mini_batch_length=2,
mini_batch_size=64,
unroll_length=1,
num_updates_per_train_iter=1,
num_iterations=10000,
num_checkpoints=5,
evaluate=False,
debug_summaries=True,
summarize_grads_and_vars=1,
summary_interval=100,
replay_buffer_length=100000)
| 30.818182
| 80
| 0.786136
|
4af750c9deaf440e8d6cc03561588f2a23891e59
| 826
|
py
|
Python
|
tests/test_epithet.py
|
phildini/epithet
|
82bb3f1e1d81329ac9e35b4c4efc38947340ea19
|
[
"MIT"
] | 6
|
2017-05-25T18:30:44.000Z
|
2019-08-13T20:39:34.000Z
|
tests/test_epithet.py
|
phildini/epithet
|
82bb3f1e1d81329ac9e35b4c4efc38947340ea19
|
[
"MIT"
] | 2
|
2021-03-25T21:39:52.000Z
|
2021-11-15T17:46:46.000Z
|
tests/test_epithet.py
|
phildini/epithet
|
82bb3f1e1d81329ac9e35b4c4efc38947340ea19
|
[
"MIT"
] | 1
|
2019-08-13T20:39:39.000Z
|
2019-08-13T20:39:39.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_epithet
----------------------------------
Tests for `epithet` module.
"""
import sys
import unittest
from contextlib import contextmanager
from click.testing import CliRunner
from epithet import epithet
from epithet import cli
class TestEpithet(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_000_something(self):
pass
def test_command_line_interface(self):
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
assert 'epithet.cli.main' in result.output
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.output
| 20.65
| 74
| 0.639225
|
4e0cf37083c4473f77542d37b92d7f36b6faa8d2
| 20,240
|
py
|
Python
|
python/ccxt/async_support/coinone.py
|
pkaske/ccxt
|
19821cfe0b6899d42b714757137dce9f00c406a0
|
[
"MIT"
] | 3
|
2020-06-02T10:48:48.000Z
|
2022-03-12T20:46:01.000Z
|
python/ccxt/async_support/coinone.py
|
pkaske/ccxt
|
19821cfe0b6899d42b714757137dce9f00c406a0
|
[
"MIT"
] | 3
|
2020-09-08T00:13:39.000Z
|
2021-05-08T20:05:48.000Z
|
python/ccxt/async_support/coinone.py
|
pkaske/ccxt
|
19821cfe0b6899d42b714757137dce9f00c406a0
|
[
"MIT"
] | 1
|
2020-03-16T03:22:17.000Z
|
2020-03-16T03:22:17.000Z
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import base64
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import ExchangeNotAvailable
class coinone(Exchange):
def describe(self):
return self.deep_extend(super(coinone, self).describe(), {
'id': 'coinone',
'name': 'CoinOne',
'countries': ['KR'], # Korea
'rateLimit': 667,
'version': 'v2',
'has': {
'CORS': False,
'createMarketOrder': False,
'fetchTickers': True,
'fetchOrder': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/38003300-adc12fba-323f-11e8-8525-725f53c4a659.jpg',
'api': 'https://api.coinone.co.kr',
'www': 'https://coinone.co.kr',
'doc': 'https://doc.coinone.co.kr',
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'api': {
'public': {
'get': [
'orderbook/',
'trades/',
'ticker/',
],
},
'private': {
'post': [
'account/btc_deposit_address/',
'account/balance/',
'account/daily_balance/',
'account/user_info/',
'account/virtual_account/',
'order/cancel_all/',
'order/cancel/',
'order/limit_buy/',
'order/limit_sell/',
'order/complete_orders/',
'order/limit_orders/',
'order/order_info/',
'transaction/auth_number/',
'transaction/history/',
'transaction/krw/history/',
'transaction/btc/',
'transaction/coin/',
],
},
},
'markets': {
'BCH/KRW': {'id': 'bch', 'symbol': 'BCH/KRW', 'base': 'BCH', 'quote': 'KRW', 'baseId': 'bch', 'quoteId': 'krw'},
'BTC/KRW': {'id': 'btc', 'symbol': 'BTC/KRW', 'base': 'BTC', 'quote': 'KRW', 'baseId': 'btc', 'quoteId': 'krw'},
'BTG/KRW': {'id': 'btg', 'symbol': 'BTG/KRW', 'base': 'BTG', 'quote': 'KRW', 'baseId': 'btg', 'quoteId': 'krw'},
'ETC/KRW': {'id': 'etc', 'symbol': 'ETC/KRW', 'base': 'ETC', 'quote': 'KRW', 'baseId': 'etc', 'quoteId': 'krw'},
'ETH/KRW': {'id': 'eth', 'symbol': 'ETH/KRW', 'base': 'ETH', 'quote': 'KRW', 'baseId': 'eth', 'quoteId': 'krw'},
'IOTA/KRW': {'id': 'iota', 'symbol': 'IOTA/KRW', 'base': 'IOTA', 'quote': 'KRW', 'baseId': 'iota', 'quoteId': 'krw'},
'LTC/KRW': {'id': 'ltc', 'symbol': 'LTC/KRW', 'base': 'LTC', 'quote': 'KRW', 'baseId': 'ltc', 'quoteId': 'krw'},
'OMG/KRW': {'id': 'omg', 'symbol': 'OMG/KRW', 'base': 'OMG', 'quote': 'KRW', 'baseId': 'omg', 'quoteId': 'krw'},
'QTUM/KRW': {'id': 'qtum', 'symbol': 'QTUM/KRW', 'base': 'QTUM', 'quote': 'KRW', 'baseId': 'qtum', 'quoteId': 'krw'},
'XRP/KRW': {'id': 'xrp', 'symbol': 'XRP/KRW', 'base': 'XRP', 'quote': 'KRW', 'baseId': 'xrp', 'quoteId': 'krw'},
'EOS/KRW': {'id': 'eos', 'symbol': 'EOS/KRW', 'base': 'EOS', 'quote': 'KRW', 'baseId': 'eos', 'quoteId': 'krw'},
'DATA/KRW': {'id': 'data', 'symbol': 'DATA/KRW', 'base': 'DATA', 'quote': 'KRW', 'baseId': 'data', 'quoteId': 'krw'},
'ZIL/KRW': {'id': 'zil', 'symbol': 'ZIL/KRW', 'base': 'ZIL', 'quote': 'KRW', 'baseId': 'zil', 'quoteId': 'krw'},
'KNC/KRW': {'id': 'knc', 'symbol': 'KNC/KRW', 'base': 'KNC', 'quote': 'KRW', 'baseId': 'knc', 'quoteId': 'krw'},
'ZRX/KRW': {'id': 'zrx', 'symbol': 'ZRX/KRW', 'base': 'ZRX', 'quote': 'KRW', 'baseId': 'zrx', 'quoteId': 'krw'},
'LUNA/KRW': {'id': 'luna', 'symbol': 'LUNA/KRW', 'base': 'LUNA', 'quote': 'KRW', 'baseId': 'luna', 'quoteId': 'krw'},
'ATOM/KRW': {'id': 'atom', 'symbol': 'ATOM/KRW', 'base': 'ATOM', 'quote': 'KRW', 'baseId': 'atom', 'quoteId': 'krw'},
'VNT/KRW': {'id': 'vnt', 'symbol': 'VNT/KRW', 'base': 'VNT', 'quote': 'KRW', 'baseId': 'vnt', 'quoteId': 'krw'},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': 0.001,
'maker': 0.001,
'tiers': {
'taker': [
[0, 0.001],
[100000000, 0.0009],
[1000000000, 0.0008],
[5000000000, 0.0007],
[10000000000, 0.0006],
[20000000000, 0.0005],
[30000000000, 0.0004],
[40000000000, 0.0003],
[50000000000, 0.0002],
],
'maker': [
[0, 0.001],
[100000000, 0.0008],
[1000000000, 0.0006],
[5000000000, 0.0004],
[10000000000, 0.0002],
[20000000000, 0],
[30000000000, 0],
[40000000000, 0],
[50000000000, 0],
],
},
},
},
'exceptions': {
'405': ExchangeNotAvailable,
'104': OrderNotFound,
},
})
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privatePostAccountBalance(params)
result = {'info': response}
balances = self.omit(response, [
'errorCode',
'result',
'normalWallets',
])
currencyIds = list(balances.keys())
for i in range(0, len(currencyIds)):
currencyId = currencyIds[i]
balance = balances[currencyId]
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_float(balance, 'avail')
account['total'] = self.safe_float(balance, 'balance')
result[code] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'currency': market['id'],
'format': 'json',
}
response = await self.publicGetOrderbook(self.extend(request, params))
return self.parse_order_book(response, None, 'bid', 'ask', 'price', 'qty')
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
request = {
'currency': 'all',
'format': 'json',
}
response = await self.publicGetTicker(self.extend(request, params))
result = {}
ids = list(response.keys())
for i in range(0, len(ids)):
id = ids[i]
symbol = id
market = None
if id in self.markets_by_id:
market = self.markets_by_id[id]
symbol = market['symbol']
ticker = response[id]
result[symbol] = self.parse_ticker(ticker, market)
return result
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'currency': market['id'],
'format': 'json',
}
response = await self.publicGetTicker(self.extend(request, params))
return self.parse_ticker(response, market)
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
last = self.safe_float(ticker, 'last')
previousClose = self.safe_float(ticker, 'yesterday_last')
change = None
if last is not None and previousClose is not None:
change = previousClose - last
symbol = market['symbol'] if (market is not None) else None
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': None,
'bidVolume': None,
'ask': None,
'askVolume': None,
'vwap': None,
'open': self.safe_float(ticker, 'first'),
'close': last,
'last': last,
'previousClose': previousClose,
'change': change,
'percentage': None,
'average': None,
'baseVolume': self.safe_float(ticker, 'volume'),
'quoteVolume': None,
'info': ticker,
}
def parse_trade(self, trade, market=None):
timestamp = self.safe_timestamp(trade, 'timestamp')
symbol = market['symbol'] if (market is not None) else None
is_ask = self.safe_string(trade, 'is_ask')
side = None
if is_ask == '1':
side = 'sell'
elif is_ask == '0':
side = 'buy'
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'qty')
cost = None
if price is not None:
if amount is not None:
cost = price * amount
return {
'id': None,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': None,
'symbol': symbol,
'type': None,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'currency': market['id'],
'period': 'hour',
'format': 'json',
}
response = await self.publicGetTrades(self.extend(request, params))
return self.parse_trades(response['completeOrders'], market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
if type != 'limit':
raise ExchangeError(self.id + ' allows limit orders only')
await self.load_markets()
request = {
'price': price,
'currency': self.market_id(symbol),
'qty': amount,
}
method = 'privatePostOrder' + self.capitalize(type) + self.capitalize(side)
response = await getattr(self, method)(self.extend(request, params))
id = self.safe_string(response, 'orderId')
if id is not None:
id = id.upper()
timestamp = self.milliseconds()
cost = price * amount
order = {
'info': response,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'cost': cost,
'average': None,
'amount': amount,
'filled': None,
'remaining': amount,
'status': 'open',
'fee': None,
}
self.orders[id] = order
return order
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
result = None
market = None
if symbol is None:
if id in self.orders:
market = self.market(self.orders[id]['symbol'])
else:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument for order ids missing in the .orders cache(the order was created with a different instance of self class or within a different run of self code).')
else:
market = self.market(symbol)
try:
request = {
'order_id': id,
'currency': market['id'],
}
response = await self.privatePostOrderOrderInfo(self.extend(request, params))
result = self.parse_order(response)
self.orders[id] = result
except Exception as e:
if isinstance(e, OrderNotFound):
if id in self.orders:
self.orders[id]['status'] = 'canceled'
result = self.orders[id]
else:
raise e
else:
raise e
return result
def parse_order_status(self, status):
statuses = {
'live': 'open',
'partially_filled': 'open',
'filled': 'closed',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
info = self.safe_value(order, 'info')
id = self.safe_string_upper(info, 'orderId')
timestamp = self.safe_timestamp(info, 'timestamp')
status = self.parse_order_status(self.safe_string(order, 'status'))
cost = None
side = self.safe_string(info, 'type')
if side.find('ask') >= 0:
side = 'sell'
else:
side = 'buy'
price = self.safe_float(info, 'price')
amount = self.safe_float(info, 'qty')
remaining = self.safe_float(info, 'remainQty')
filled = None
if amount is not None:
if remaining is not None:
filled = amount - remaining
if price is not None:
cost = price * amount
currency = self.safe_string(info, 'currency')
fee = {
'currency': currency,
'cost': self.safe_float(info, 'fee'),
'rate': self.safe_float(info, 'feeRate'),
}
symbol = None
if market is None:
marketId = currency.lower()
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
if market is not None:
symbol = market['symbol']
return {
'info': order,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': 'limit',
'side': side,
'price': price,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
}
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
order = self.safe_value(self.orders, id)
amount = None
price = None
side = None
if order is None:
if symbol is None:
# eslint-disable-next-line quotes
raise InvalidOrder(self.id + " cancelOrder could not find the order id " + id + " in orders cache. The order was probably created with a different instance of self class earlier. The `symbol` argument is missing. To cancel the order, pass a symbol argument and {'price': 12345, 'qty': 1.2345, 'is_ask': 0} in the params argument of cancelOrder.")
price = self.safe_float(params, 'price')
if price is None:
# eslint-disable-next-line quotes
raise InvalidOrder(self.id + " cancelOrder could not find the order id " + id + " in orders cache. The order was probably created with a different instance of self class earlier. The `price` parameter is missing. To cancel the order, pass a symbol argument and {'price': 12345, 'qty': 1.2345, 'is_ask': 0} in the params argument of cancelOrder.")
amount = self.safe_float(params, 'qty')
if amount is None:
# eslint-disable-next-line quotes
raise InvalidOrder(self.id + " cancelOrder could not find the order id " + id + " in orders cache. The order was probably created with a different instance of self class earlier. The `qty`(amount) parameter is missing. To cancel the order, pass a symbol argument and {'price': 12345, 'qty': 1.2345, 'is_ask': 0} in the params argument of cancelOrder.")
side = self.safe_float(params, 'is_ask')
if side is None:
# eslint-disable-next-line quotes
raise InvalidOrder(self.id + " cancelOrder could not find the order id " + id + " in orders cache. The order was probably created with a different instance of self class earlier. The `is_ask`(side) parameter is missing. To cancel the order, pass a symbol argument and {'price': 12345, 'qty': 1.2345, 'is_ask': 0} in the params argument of cancelOrder.")
else:
price = order['price']
amount = order['amount']
side = 0 if (order['side'] == 'buy') else 1
symbol = order['symbol']
request = {
'order_id': id,
'price': price,
'qty': amount,
'is_ask': side,
'currency': self.market_id(symbol),
}
self.orders[id]['status'] = 'canceled'
return await self.privatePostOrderCancel(self.extend(request, params))
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
request = self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
url = self.urls['api'] + '/'
if api == 'public':
url += request
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
url += self.version + '/' + request
nonce = str(self.nonce())
json = self.json(self.extend({
'access_token': self.apiKey,
'nonce': nonce,
}, params))
payload = base64.b64encode(self.encode(json))
body = self.decode(payload)
secret = self.secret.upper()
signature = self.hmac(payload, self.encode(secret), hashlib.sha512)
headers = {
'content-type': 'application/json',
'X-COINONE-PAYLOAD': payload,
'X-COINONE-SIGNATURE': signature,
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
if 'result' in response:
result = response['result']
if result != 'success':
#
# { "errorCode": "405", "status": "maintenance", "result": "error"}
#
code = self.safe_string(response, 'errorCode')
feedback = self.id + ' ' + self.json(response)
exceptions = self.exceptions
if code in exceptions:
raise exceptions[code](feedback)
else:
raise ExchangeError(feedback)
else:
raise ExchangeError(self.id + ' ' + body)
| 42.972399
| 369
| 0.495059
|
977f9fb0a9f09b38acb87a7cae1d7be8d27c5ba2
| 2,666
|
py
|
Python
|
test/integration/ggrc/services/test_assessments.py
|
Killswitchz/ggrc-core
|
2460df94daf66727af248ad821462692917c97a9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
test/integration/ggrc/services/test_assessments.py
|
Killswitchz/ggrc-core
|
2460df94daf66727af248ad821462692917c97a9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
test/integration/ggrc/services/test_assessments.py
|
Killswitchz/ggrc-core
|
2460df94daf66727af248ad821462692917c97a9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests for assessment service handle."""
import random
from ddt import data, ddt
from ggrc.models import all_models
from integration.ggrc import TestCase
from integration.ggrc.query_helper import WithQueryApi
from integration.ggrc.models import factories
from integration.ggrc.api_helper import Api
from integration.ggrc.generator import ObjectGenerator
@ddt
class TestCollection(TestCase, WithQueryApi):
"""Test for collection assessment objects."""
def setUp(self):
super(TestCollection, self).setUp()
self.client.get("/login")
self.clear_data()
self.expected_ids = []
self.api = Api()
self.generator = ObjectGenerator()
assessments = [factories.AssessmentFactory() for _ in range(10)]
random.shuffle(assessments)
for idx, assessment in enumerate(assessments):
comment = factories.CommentFactory(description=str(idx))
factories.RelationshipFactory(source=assessment, destination=comment)
self.expected_ids.append(assessment.id)
@data(True, False)
def test_order_by_test(self, desc):
"""Order by fultext attr"""
query = self._make_query_dict(
"Assessment", order_by=[{"name": "comment", "desc": desc}]
)
expected_ids = self.expected_ids
if desc:
expected_ids = expected_ids[::-1]
results = self._get_first_result_set(query, "Assessment", "values")
self.assertEqual(expected_ids, [i['id'] for i in results])
@data("Assessor", "Creator", "Verifier")
def test_delete_assessment_by_role(self, role_name):
"""Delete assessment not allowed for based on Assignee Type."""
with factories.single_commit():
assessment = factories.AssessmentFactory()
context = factories.ContextFactory(related_object=assessment)
assessment.context = context
person = factories.PersonFactory()
object_person_rel = factories.RelationshipFactory(
source=assessment, destination=person)
factories.RelationshipAttrFactory(
relationship_id=object_person_rel.id,
attr_name="AssigneeType",
attr_value=role_name,
)
assessment_id = assessment.id
role = all_models.Role.query.filter(
all_models.Role.name == "Creator"
).first()
self.generator.generate_user_role(person, role, context)
self.api.set_user(person)
assessment = all_models.Assessment.query.get(assessment_id)
resp = self.api.delete(assessment)
self.assert403(resp)
self.assertTrue(all_models.Assessment.query.filter(
all_models.Assessment.id == assessment_id).one())
| 35.546667
| 78
| 0.721305
|
8591dbe0080d4af0c0401f868169f9966e6d2c80
| 10,169
|
py
|
Python
|
src/python/openbarcode/linear.py
|
Floms/Open-Barcode
|
f721bfeecd682a683e80d7b9c6987ffda3dc9213
|
[
"Apache-2.0"
] | 9
|
2015-09-23T18:38:58.000Z
|
2018-10-16T06:33:17.000Z
|
src/python/openbarcode/linear.py
|
Floms/Open-Barcode
|
f721bfeecd682a683e80d7b9c6987ffda3dc9213
|
[
"Apache-2.0"
] | null | null | null |
src/python/openbarcode/linear.py
|
Floms/Open-Barcode
|
f721bfeecd682a683e80d7b9c6987ffda3dc9213
|
[
"Apache-2.0"
] | 11
|
2015-05-05T10:26:46.000Z
|
2017-12-07T15:02:57.000Z
|
#
# Copyright 2013 Floms, LLC (Yoel Nunez <y.nunez@developers.floms.com>)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
__author__ = 'Yoel Nunez <y.nunez@developers.floms.com>'
from abc import ABCMeta, abstractmethod
class LinearBarcode:
__metaclass__ = ABCMeta
code = None
bars = []
@abstractmethod
def build_sequence(self):
pass
@abstractmethod
def barcode(self):
pass
@abstractmethod
def calculate_check_digit(self):
pass
@abstractmethod
def map_sequence(self, char, pos):
pass
class UPC(LinearBarcode):
def __init__(self, code):
if len(code) != 11 and len(code) != 12:
raise Exception("Invalid UPC code length")
self.code = code[:11]
self.build_sequence()
def build_sequence(self):
self.code += str(self.calculate_check_digit())
code = "*" + self.code[0:6] + "#" + self.code[6:12] + "*"
for p in range(len(code)):
self.bars[len(self.bars):] = self.map_sequence(code[p:p + 1], p)
def barcode(self):
return self.bars
def map_sequence(self, char, pos):
sequence = {
"0": [0, 0, 0, 1, 1, 0, 1], "1": [0, 0, 1, 1, 0, 0, 1], "2": [0, 0, 1, 0, 0, 1, 1],
"3": [0, 1, 1, 1, 1, 0, 1], "4": [0, 1, 0, 0, 0, 1, 1], "5": [0, 1, 1, 0, 0, 0, 1],
"6": [0, 1, 0, 1, 1, 1, 1], "7": [0, 1, 1, 1, 0, 1, 1], "8": [0, 1, 1, 0, 1, 1, 1],
"9": [0, 0, 0, 1, 0, 1, 1], "#": [0, 1, 0, 1, 0], "*": [1, 0, 1]
}
if pos >= 7:
sequence["0"] = [1, 1, 1, 0, 0, 1, 0]
sequence["1"] = [1, 1, 0, 0, 1, 1, 0]
sequence["2"] = [1, 1, 0, 1, 1, 0, 0]
sequence["3"] = [1, 0, 0, 0, 0, 1, 0]
sequence["4"] = [1, 0, 1, 1, 1, 0, 0]
sequence["5"] = [1, 0, 0, 1, 1, 1, 0]
sequence["6"] = [1, 0, 1, 0, 0, 0, 0]
sequence["7"] = [1, 0, 0, 0, 1, 0, 0]
sequence["8"] = [1, 0, 0, 1, 0, 0, 0]
sequence["9"] = [1, 1, 1, 0, 1, 0, 0]
return sequence[char]
def calculate_check_digit(self):
check_sum = 0
for i in range(11):
digit = int(self.code[i:i + 1])
if i % 2 == 0:
check_sum += digit * 3
else:
check_sum += digit
check_sum %= 10
if check_sum != 0:
return 10 - check_sum
else:
return check_sum
class EAN(LinearBarcode):
base = 0
def __init__(self, code):
if len(code) != 12 and len(code) != 13:
raise Exception("Invalid UPC code length")
self.code = code[:12]
self.base = int(code[:1])
self.build_sequence()
def build_sequence(self):
self.code += str(self.calculate_check_digit())
code = "*" + self.code[1:7] + "#" + self.code[7:13] + "*"
for p in range(len(code)):
self.bars[len(self.bars):] = self.map_sequence(code[p:p + 1], p)
def barcode(self):
return self.bars
def code_l(self):
return {
"0": [0, 0, 0, 1, 1, 0, 1],
"1": [0, 0, 1, 1, 0, 0, 1],
"2": [0, 0, 1, 0, 0, 1, 1],
"3": [0, 1, 1, 1, 1, 0, 1],
"4": [0, 1, 0, 0, 0, 1, 1],
"5": [0, 1, 1, 0, 0, 0, 1],
"6": [0, 1, 0, 1, 1, 1, 1],
"7": [0, 1, 1, 1, 0, 1, 1],
"8": [0, 1, 1, 0, 1, 1, 1],
"9": [0, 0, 0, 1, 0, 1, 1],
"#": [0, 1, 0, 1, 0],
"*": [1, 0, 1]
}
def code_g(self):
base = self.code_l()
base["0"] = [0, 1, 0, 0, 1, 1, 1]
base["1"] = [0, 1, 1, 0, 0, 1, 1]
base["2"] = [0, 0, 1, 1, 0, 1, 1]
base["3"] = [0, 1, 0, 0, 0, 0, 1]
base["4"] = [0, 0, 1, 1, 1, 0, 1]
base["5"] = [0, 1, 1, 1, 0, 0, 1]
base["6"] = [0, 0, 0, 0, 1, 0, 1]
base["7"] = [0, 0, 1, 0, 0, 0, 1]
base["8"] = [0, 0, 0, 1, 0, 0, 1]
base["9"] = [0, 0, 1, 0, 1, 1, 1]
return base
def code_r(self):
base = self.code_l()
base["0"] = [1, 1, 1, 0, 0, 1, 0]
base["1"] = [1, 1, 0, 0, 1, 1, 0]
base["2"] = [1, 1, 0, 1, 1, 0, 0]
base["3"] = [1, 0, 0, 0, 0, 1, 0]
base["4"] = [1, 0, 1, 1, 1, 0, 0]
base["5"] = [1, 0, 0, 1, 1, 1, 0]
base["6"] = [1, 0, 1, 0, 0, 0, 0]
base["7"] = [1, 0, 0, 0, 1, 0, 0]
base["8"] = [1, 0, 0, 1, 0, 0, 0]
base["9"] = [1, 1, 1, 0, 1, 0, 0]
return base
def map_sequence(self, char, pos):
sequence = {}
b = self.base
if pos > 6:
sequence = self.code_r()
elif b == 0:
sequence = self.code_l()
elif b == 1:
if pos == 1 or pos == 2 or pos == 4:
sequence = self.code_l()
else:
sequence = self.code_g()
elif b == 2:
if pos == 1 or pos == 2 or pos == 5:
sequence = self.code_l()
else:
sequence = self.code_g()
elif b == 3:
if pos == 1 or pos == 2 or pos == 6:
sequence = self.code_l()
else:
sequence = self.code_g()
elif b == 4:
if pos == 1 or pos == 3 or pos == 4:
sequence = self.code_l()
else:
sequence = self.code_g()
elif b == 5:
if pos == 1 or pos == 4 or pos == 5:
sequence = self.code_l()
else:
sequence = self.code_g()
elif b == 6:
if pos == 1 or pos == 5 or pos == 6:
sequence = self.code_l()
else:
sequence = self.code_g()
elif b == 7:
if pos == 1 or pos == 3 or pos == 5:
sequence = self.code_l()
else:
sequence = self.code_g()
elif b == 8:
if pos == 1 or pos == 3 or pos == 6:
sequence = self.code_l()
else:
sequence = self.code_g()
else:
if pos == 1 or pos == 4 or pos == 6:
sequence = self.code_l()
else:
sequence = self.code_g()
return sequence[char]
def calculate_check_digit(self):
check_sum = 0
for i in range(12):
digit = int(self.code[i:i + 1])
if i % 2 == 1:
check_sum += digit * 3
else:
check_sum += digit
check_sum %= 10
if check_sum != 0:
return 10 - check_sum
else:
return check_sum
class Code39(LinearBarcode):
def __init__(self, code):
self.code = str(code).upper()
self.build_sequence()
def build_sequence(self):
code = "*" + self.code + "*"
for p in range(len(code)):
self.bars[len(self.bars):] = self.map_sequence(code[p:p + 1], p)
self.bars.append(0)
def barcode(self):
return self.bars
def map_sequence(self, char, pos):
sequence = {
"0": [1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1],
"1": [1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1],
"2": [1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1],
"3": [1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1],
"4": [1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1],
"5": [1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1],
"6": [1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1],
"7": [1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1],
"8": [1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1],
"9": [1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1],
"A": [1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1],
"B": [1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1],
"C": [1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1],
"D": [1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1],
"E": [1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1],
"F": [1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1],
"G": [1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1],
"H": [1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1],
"I": [1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1],
"J": [1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1],
"K": [1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1],
"L": [1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1],
"M": [1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1],
"N": [1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1],
"O": [1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1],
"P": [1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1],
"Q": [1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1],
"R": [1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1],
"S": [1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1],
"T": [1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1],
"U": [1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1],
"V": [1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1],
"W": [1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1],
"X": [1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1],
"Y": [1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1],
"Z": [1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1],
"-": [1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1],
".": [1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1],
" ": [1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1],
"$": [1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1],
"/": [1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1],
"+": [1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1],
"%": [1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1],
"*": [1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1],
}
return sequence[char]
def calculate_check_digit(self):
return 0
| 31.482972
| 95
| 0.392566
|
f177b2515d6a1013c043cf6f6e4a2f7db8bac784
| 1,823
|
py
|
Python
|
machineLearning/206/extract-colors.py
|
WebClub-NITK/Hacktoberfest-2k19
|
69fafb354f0da58220a7ba68696b4d7fde0a3d5c
|
[
"MIT"
] | 28
|
2019-10-01T09:13:50.000Z
|
2021-04-18T18:15:34.000Z
|
machineLearning/206/extract-colors.py
|
arpita221b/Hacktoberfest-2k19-1
|
6f682ea2226a8ce6f5a913da9ecdafff7a9fa5bd
|
[
"MIT"
] | 236
|
2019-09-30T16:06:09.000Z
|
2022-02-26T18:37:03.000Z
|
machineLearning/206/extract-colors.py
|
arpita221b/Hacktoberfest-2k19-1
|
6f682ea2226a8ce6f5a913da9ecdafff7a9fa5bd
|
[
"MIT"
] | 184
|
2019-09-30T16:08:04.000Z
|
2022-03-09T05:00:29.000Z
|
import numpy as np
import cv2
import argparse
parser =argparse.ArgumentParser()
parser.add_argument("--color",type=str,default='R',help="insert uppercase first letter in color you want to see its output")
parser.add_argument("--image",type=str,default="colors.png",help="Specify the image location")
arg = parser.parse_args()
colors = ['R','G','B','Y','M','C']
image = arg.image
color = arg.color
img = cv2.imread(image)
R_channel = img[:,:,2]
G_channel = img[:,:,1]
B_channel = img[:,:,0]
#Red colors
imageR = img.copy()
imageR[R_channel < 128 ] = 0
imageR[G_channel > 51] = 0
imageR[B_channel > 51] = 0
imageR = cv2.resize(imageR,(int(img.shape[1]*0.5),int(img.shape[0]*0.5)))
#Green Colors
imageG = img.copy()
imageG[G_channel <100] = 0
imageG[R_channel > 173 ]=0
imageG[B_channel > 102]=0
imageG = cv2.resize(imageG,(int(img.shape[1]*0.5),int(img.shape[0]*0.5)))
#Blue color
imageB = img.copy()
imageB[B_channel < 205] = 0
imageB[(G_channel > 153)]=0
imageB[R_channel > 51]=0
imageB = cv2.resize(imageB,(int(img.shape[1]*0.5),int(img.shape[0]*0.5)))
#Yellow color
imageY = img.copy()
imageY[(B_channel < 51) & (B_channel > 153)] = 0
imageY[(G_channel < 200)]=0
imageY[R_channel < 200]=0
imageY = cv2.resize(imageY,(int(img.shape[1]*0.5),int(img.shape[0]*0.5)))
#Yellow color
imageM = img.copy()
imageM[B_channel < 153]=0
imageM[(R_channel < 76)] = 0
imageM[(G_channel >51)]=0
imageM = cv2.resize(imageM,(int(img.shape[1]*0.5),int(img.shape[0]*0.5)))
#Cyan color
imageC = img.copy()
imageC[B_channel < 153]=0
imageC[(G_channel < 153)] = 0
imageC[(R_channel >51)]=0
imageC = cv2.resize(imageC,(int(img.shape[1]*0.5),int(img.shape[0]*0.5)))
images = [imageR,imageG,imageB,imageY,imageM,imageC]
image_sel = images[colors.index(color)]
cv2.imshow("image",image_sel)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 25.319444
| 124
| 0.687877
|
fbada46f71a0743bfd2e8e71cf918e8535daf59c
| 216
|
wsgi
|
Python
|
flaskapp.wsgi
|
pedroalvesfilho/catalog_bikes
|
dacabec1c0f4efea3a14d4fc9086ff1a8550396b
|
[
"MIT"
] | null | null | null |
flaskapp.wsgi
|
pedroalvesfilho/catalog_bikes
|
dacabec1c0f4efea3a14d4fc9086ff1a8550396b
|
[
"MIT"
] | null | null | null |
flaskapp.wsgi
|
pedroalvesfilho/catalog_bikes
|
dacabec1c0f4efea3a14d4fc9086ff1a8550396b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python</h1>
import sys
import logging
logging.basicConfig(stream=sys.stderr)
sys.path.insert(0,"/var/www/FlaskApp")
from FlaskApp import app as application
application.secret_key = 'Add your secret key'
| 21.6
| 46
| 0.777778
|
61ffc43c4662e1ac3e1e206a7a5120adad3bda35
| 6,844
|
py
|
Python
|
datasets/preprocess/SyRIP.py
|
ostadabbas/HW-HuP
|
0a7b4263f72e7ff7f9bc4c81366569822c3ee248
|
[
"MIT"
] | null | null | null |
datasets/preprocess/SyRIP.py
|
ostadabbas/HW-HuP
|
0a7b4263f72e7ff7f9bc4c81366569822c3ee248
|
[
"MIT"
] | null | null | null |
datasets/preprocess/SyRIP.py
|
ostadabbas/HW-HuP
|
0a7b4263f72e7ff7f9bc4c81366569822c3ee248
|
[
"MIT"
] | null | null | null |
import os
from os.path import join
import sys
import json
import numpy as np
from tqdm import tqdm
# from .read_openpose import read_openpose
def coco_extract(dataset_path, out_path):
'''
no open pose data, SyRIP version
:param dataset_path:
:param out_path:
:return:
'''
# convert joints to global order
joints_idx = [19, 20, 21, 22, 23, 9, 8, 10, 7, 11, 6, 3, 2, 4, 1, 5, 0] # convert to 17 joints
# joints_idx = [19, 20, 21, 22, 23, 9, 8, 10, 7, 11, 6, 3, 2, 4, 1, 5, 0] # convert to 17 joints
# right left hip 2, 3 , visible to 0 for
# bbox expansion factor
scaleFactor = 1.2
# structs we need
imgnames_, scales_, centers_, parts_, openposes_ = [], [], [], [], []
# json annotation file
json_path = os.path.join(dataset_path,
'annotations/200R_1000S/',
'person_keypoints_train_infant.json')
json_data = json.load(open(json_path, 'r'))
imgs = {}
for img in json_data['images']:
imgs[img['id']] = img # {0: {'file_name':xx, 'RGB':....}
n_chk = -1
for i, annot in tqdm(enumerate(json_data['annotations']), desc='gen SyRIP db for SPIN...'):
# keypoints processing
if n_chk>0 and i>=n_chk:
break
keypoints = annot['keypoints']
keypoints = np.reshape(keypoints, (17,3))
keypoints[keypoints[:,2]>0,2] = 1
# check if all major body joints are annotated
if sum(keypoints[5:,2]>0) < 12:
continue
# image name
image_id = annot['image_id']
img_name = str(imgs[image_id]['file_name'])
img_name_full = join('images/1200/', img_name) # relative from ds folder to images
# keypoints
part = np.zeros([24,3])
part[joints_idx] = keypoints # 24 joints, put the gt 17 in, 2, 3 vis to 0 , add openpose jt
# scale and center
bbox = annot['bbox']
center = [bbox[0] + bbox[2]/2, bbox[1] + bbox[3]/2]
scale = scaleFactor*max(bbox[2], bbox[3])/200
# read openpose detections, no read openpose
# json_file = os.path.join(openpose_path, 'coco',
# img_name.replace('.jpg', '_keypoints.json'))
# openpose = read_openpose(json_file, part, 'coco')
# update only the hip to openpose , then clean the part vis
openpose = np.zeros([25, 3])
# r,l hip op 9, 12 , part 2, 3
openpose[9] = part[2]
openpose[12] = part[3]
part[[2,3], 2] = 0 # clean up the vis for hip
# debug show
if not n_chk<0:
print('id {} op and part'.format(i))
print(openpose)
print(part)
# store data
imgnames_.append(img_name_full)
centers_.append(center)
scales_.append(scale)
parts_.append(part) # gt 17
openposes_.append(openpose) # openpose 25 correct detection
# store the data struct
print('valid data length', len(imgnames_))
if n_chk<0:
if not os.path.isdir(out_path):
os.makedirs(out_path)
out_file = os.path.join(out_path, 'SyRIP_train.npz')
print("file saved to {}".format(out_file))
np.savez(out_file, imgname=imgnames_,
center=centers_,
scale=scales_,
part=parts_,
openpose=openposes_)
def SyRIPv2_extract(dataset_path, out_path, is_train=False):
'''
coco format, SyRIP version
:param dataset_path:
:param out_path:
:return:
'''
# convert joints to global order
joints_idx = [19, 20, 21, 22, 23, 9, 8, 10, 7, 11, 6, 3, 2, 4, 1, 5, 0] # convert to 17 joints
# joints_idx = [19, 20, 21, 22, 23, 9, 8, 10, 7, 11, 6, 3, 2, 4, 1, 5, 0] # convert to 17 joints
# right left hip 2, 3 , visible to 0 for
# bbox expansion factor
scaleFactor = 1.2
# json annotation file
if is_train:
split = 'train'
else:
split = 'valid'
# structs we need
imgnames_, scales_, centers_, parts_, openposes_ = [], [], [], [], []
# json annotation file
json_path = os.path.join(dataset_path,
'anno_{}.json'.format(split))
json_data = json.load(open(json_path, 'r'))
imgs = {}
for img in json_data['images']:
imgs[img['id']] = img # {0: {'file_name':xx, 'RGB':....}
n_chk = -1
N= len(imgs)
for i, annot in tqdm(enumerate(json_data['annotations']), desc='gen SyRIP db for SPIN...', total=N):
# keypoints processing
if n_chk > 0 and i >= n_chk:
break
keypoints = annot['keypoints']
keypoints = np.reshape(keypoints, (17, 3))
keypoints[keypoints[:, 2] > 0, 2] = 1
# check if all major body joints are annotated
if sum(keypoints[5:, 2] > 0) < 12: # if not all joints visible.
continue
# image name
# image_id = annot['image_id']
image_id = annot['id']
img_name = str(imgs[image_id]['file_name'])
img_name_full = join('RGB', img_name) # relative from ds folder to images
# keypoints
part = np.zeros([24, 3])
part[joints_idx] = keypoints # 24 joints, put the gt 17 in, 2, 3 vis to 0 , add openpose jt
# scale and center
bbox = annot['bbox']
center = [bbox[0] + bbox[2] / 2, bbox[1] + bbox[3] / 2]
scale = scaleFactor * max(bbox[2], bbox[3]) / 200
openpose = np.zeros([25, 3])
# r,l hip op 9, 12 , part 2, 3
openpose[9] = part[2]
openpose[12] = part[3]
part[[2, 3], 2] = 0 # clean up the vis for hip
# debug show
if not n_chk < 0:
print('id {} op and part'.format(i))
print(openpose)
print(part)
# store data
imgnames_.append(img_name_full)
centers_.append(center)
scales_.append(scale)
parts_.append(part) # gt 17
openposes_.append(openpose) # openpose 25 correct detection
# store the data struct
print('valid data length', len(imgnames_))
if n_chk < 0:
if not os.path.isdir(out_path):
os.makedirs(out_path)
out_file = os.path.join(out_path, 'SyRIP_{}.npz'.format(split))
print("file saved to {}".format(out_file))
np.savez(out_file, imgname=imgnames_,
center=centers_,
scale=scales_,
part=parts_,
openpose=openposes_)
if __name__ == '__main__':
# coco_extract('/scratch/liu.shu/datasets/SyRIP', '/scratch/liu.shu/codesPool/SPIN/data/dataset_extras')
SyRIPv2_extract('/scratch/liu.shu/datasets/SyRIPv2', '/scratch/liu.shu/codesPool/SPIN/data/dataset_extras', is_train=True)
| 37.604396
| 126
| 0.561222
|
2b6e4baaba8ee6948dedbf331378aa600cd96fa7
| 10,604
|
py
|
Python
|
tensorflow_probability/python/sts/fitting_test.py
|
nxdao2000/probability
|
33d2bc1cb0e7b6284579ea7f3692b9d056e0d700
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/sts/fitting_test.py
|
nxdao2000/probability
|
33d2bc1cb0e7b6284579ea7f3692b9d056e0d700
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/sts/fitting_test.py
|
nxdao2000/probability
|
33d2bc1cb0e7b6284579ea7f3692b9d056e0d700
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for STS fitting methods."""
# Dependency imports
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
tfl = tf.linalg
class VariationalInferenceTests(tf.test.TestCase):
def _build_model(self, observed_time_series):
day_of_week = tfp.sts.Seasonal(
num_seasons=7,
observed_time_series=observed_time_series,
name='day_of_week')
local_linear_trend = tfp.sts.LocalLinearTrend(
observed_time_series=observed_time_series,
name='local_linear_trend')
return tfp.sts.Sum(components=[day_of_week, local_linear_trend],
observed_time_series=observed_time_series)
def test_multiple_inits_example(self):
batch_shape = [2, 3]
num_timesteps = 5
num_inits = 10
observed_time_series = np.random.randn(
*(batch_shape + [num_timesteps])).astype(np.float32)
model = self._build_model(observed_time_series)
def build_variational_loss():
(variational_loss, _) = tfp.sts.build_factored_variational_loss(
model=model,
observed_time_series=observed_time_series,
init_batch_shape=num_inits)
return variational_loss
# We provide graph- and eager-mode optimization for TF 2.0 compatibility.
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=0.1)
if tf.executing_eagerly():
for _ in range(5): # don't actually run to completion
optimizer.minimize(build_variational_loss)
# Draw multiple samples to reduce Monte Carlo error in the optimized
# variational bounds.
avg_loss = np.mean(
[self.evaluate(build_variational_loss()) for _ in range(25)], axis=0)
else:
variational_loss = build_variational_loss()
train_op = optimizer.minimize(variational_loss)
self.evaluate(tf.compat.v1.global_variables_initializer())
for _ in range(5): # don't actually run to completion
_ = self.evaluate(train_op)
# Draw multiple samples to reduce Monte Carlo error in the optimized
# variational bounds.
avg_loss = np.mean(
[self.evaluate(variational_loss) for _ in range(25)], axis=0)
self.assertAllEqual(avg_loss.shape, [num_inits] + batch_shape)
def test_init_is_valid_for_large_observations(self):
num_timesteps = 20
observed_time_series = (
-1e8 + 1e6 * np.random.randn(num_timesteps)).astype(np.float32)
model = self._build_model(observed_time_series)
variational_loss, _ = tfp.sts.build_factored_variational_loss(
model=model, observed_time_series=observed_time_series)
self.evaluate(tf.compat.v1.global_variables_initializer())
loss_ = self.evaluate(variational_loss)
self.assertTrue(np.isfinite(loss_))
# When this test was written, the variational loss with default
# initialization and seed was 431.5 nats. Larger finite initial losses are
# not 'incorrect' as such, but if your change makes the next line fail,
# you have probably done something questionable.
self.assertLessEqual(loss_, 10000)
class _HMCTests(object):
def _build_model(self, observed_time_series):
day_of_week = tfp.sts.Seasonal(
num_seasons=7,
observed_time_series=observed_time_series,
name='day_of_week')
local_linear_trend = tfp.sts.LocalLinearTrend(
observed_time_series=observed_time_series,
name='local_linear_trend')
return tfp.sts.Sum(components=[day_of_week, local_linear_trend],
observed_time_series=observed_time_series)
def test_basic_hmc_example(self):
batch_shape = [2, 3]
num_timesteps = 5
observed_time_series = self._build_tensor(np.random.randn(
*(batch_shape + [num_timesteps])))
model = self._build_model(observed_time_series)
samples, kernel_results = tfp.sts.fit_with_hmc(
model,
observed_time_series,
num_results=4,
num_warmup_steps=2,
num_variational_steps=2)
self.evaluate(tf.compat.v1.global_variables_initializer())
samples_, kernel_results_ = self.evaluate((samples, kernel_results))
acceptance_rate = np.mean(
kernel_results_.inner_results.inner_results.is_accepted, axis=0)
posterior_means = {
param.name: np.mean(param_draws, axis=0)
for (param, param_draws) in zip(model.parameters, samples_)}
# Perfunctory checks to ensure the code executed and we got results
# of the expected shape.
self.assertAllEqual(acceptance_rate.shape, batch_shape)
for parameter in model.parameters:
self.assertAllEqual(posterior_means[parameter.name].shape,
self._batch_shape_as_list(parameter.prior) +
self._event_shape_as_list(parameter.prior))
def test_multiple_chains_example(self):
batch_shape = [2, 3]
num_timesteps = 5
num_results = 6
num_chains = 4
# Use an observation mask to additionally test that masks are
# threaded through the HMC (and VI) APIs.
observed_time_series_ = np.random.randn(
*(batch_shape + [num_timesteps]))
observed_time_series = tfp.sts.MaskedTimeSeries(
self._build_tensor(observed_time_series_),
is_missing=self._build_tensor([False, True, False, False, True],
dtype=np.bool))
model = self._build_model(observed_time_series)
samples, kernel_results = tfp.sts.fit_with_hmc(
model,
observed_time_series,
num_results=num_results,
chain_batch_shape=num_chains,
num_warmup_steps=2,
num_variational_steps=2)
self.evaluate(tf.compat.v1.global_variables_initializer())
samples_, kernel_results_ = self.evaluate((samples, kernel_results))
acceptance_rate = np.mean(
kernel_results_.inner_results.inner_results.is_accepted, axis=0)
# Combining the samples from multiple chains into a single dimension allows
# us to easily pass sampled parameters to downstream forecasting methods.
combined_samples_ = [np.reshape(param_draws,
[-1] + list(param_draws.shape[2:]))
for param_draws in samples_]
self.assertAllEqual(acceptance_rate.shape, [num_chains] + batch_shape)
for parameter, samples_ in zip(model.parameters, combined_samples_):
self.assertAllEqual(samples_.shape,
[num_results * num_chains] +
self._batch_shape_as_list(parameter.prior) +
self._event_shape_as_list(parameter.prior))
def _shape_as_list(self, tensor):
if self.use_static_shape:
return tensor.shape.as_list()
else:
return list(self.evaluate(tf.shape(input=tensor)))
def _batch_shape_as_list(self, distribution):
if self.use_static_shape:
return distribution.batch_shape.as_list()
else:
return list(self.evaluate(distribution.batch_shape_tensor()))
def _event_shape_as_list(self, distribution):
if self.use_static_shape:
return distribution.event_shape.as_list()
else:
return list(self.evaluate(distribution.event_shape_tensor()))
def _build_tensor(self, ndarray, dtype=None):
"""Convert a numpy array to a TF placeholder.
Args:
ndarray: any object convertible to a numpy array via `np.asarray()`.
dtype: optional `dtype`.
Returns:
placeholder: a TensorFlow `placeholder` with default value given by the
provided `ndarray`, dtype given by `self.dtype` (if not specified), and
shape specified statically only if `self.use_static_shape` is `True`.
"""
ndarray = np.asarray(ndarray).astype(self.dtype if dtype is None else dtype)
return tf.compat.v1.placeholder_with_default(
input=ndarray, shape=ndarray.shape if self.use_static_shape else None)
@test_util.run_all_in_graph_and_eager_modes
class HMCTestsStatic32(tf.test.TestCase, parameterized.TestCase, _HMCTests):
dtype = np.float32
use_static_shape = True
# Parameterized tests appear to require that their direct containing class
# inherits from `parameterized.TestCase`, so we have to put this test here
# rather than the base class. As a bonus, running this test only in the
# Static32 case reduces overall test weight.
@parameterized.parameters(([], []),
(3, [3]),
([3], [3]),
([5, 2], [5, 2]))
def test_chain_batch_shape(self, shape_in, expected_batch_shape_out):
batch_shape = [2, 3]
num_results = 1
num_timesteps = 5
observed_time_series = self._build_tensor(np.random.randn(
*(batch_shape + [num_timesteps])))
model = self._build_model(observed_time_series)
samples, _ = tfp.sts.fit_with_hmc(
model,
observed_time_series,
num_results=num_results,
chain_batch_shape=shape_in,
num_warmup_steps=1,
num_variational_steps=1)
self.evaluate(tf.compat.v1.global_variables_initializer())
for parameter, parameter_samples in zip(model.parameters, samples):
self.assertAllEqual(self._shape_as_list(parameter_samples),
[num_results] +
expected_batch_shape_out +
self._batch_shape_as_list(parameter.prior) +
self._event_shape_as_list(parameter.prior))
# This test runs in graph mode only to reduce test weight.
class HMCTestsDynamic32(tf.test.TestCase, _HMCTests):
dtype = np.float32
use_static_shape = False
# This test runs in graph mode only to reduce test weight.
class HMCTestsStatic64(tf.test.TestCase, _HMCTests):
dtype = np.float64
use_static_shape = True
if __name__ == '__main__':
tf.test.main()
| 39.274074
| 115
| 0.694738
|
22fc4a0f6aa9341cbd2260d964f0896bef96f783
| 823
|
py
|
Python
|
bin/dump_all_tables.py
|
johned0/EdwardsLab
|
ae0d8b51a579cd009b414d11224b4110ba13af66
|
[
"MIT"
] | 30
|
2015-01-25T16:22:51.000Z
|
2022-01-20T15:56:47.000Z
|
bin/dump_all_tables.py
|
johned0/EdwardsLab
|
ae0d8b51a579cd009b414d11224b4110ba13af66
|
[
"MIT"
] | 2
|
2020-04-13T15:00:37.000Z
|
2020-09-23T12:35:59.000Z
|
bin/dump_all_tables.py
|
johned0/EdwardsLab
|
ae0d8b51a579cd009b414d11224b4110ba13af66
|
[
"MIT"
] | 24
|
2015-04-17T00:52:05.000Z
|
2021-11-26T17:50:01.000Z
|
import argparse
import sqlite3
import pandas as pd
def to_csv(filename):
db = sqlite3.connect(filename)
cursor = db.cursor()
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = cursor.fetchall()
for table_name in tables:
table_name = table_name[0]
table = pd.read_sql_query("SELECT * from %s" % table_name, db)
table.to_csv(table_name + '.csv', index_label='index', encoding='utf-8')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Dump the contents of an SQL file to CSV. This was taken from http://stackoverflow.com/questions/305378/get-list-of-tables-db-schema-dump-etc-in-sqlite-databases')
parser.add_argument('-d', help='SQLlite database file', required=True)
args = parser.parse_args()
to_csv(args.d)
| 37.409091
| 212
| 0.705954
|
68a1a8915af6634664f6c1d0c315180aa253a17a
| 229
|
py
|
Python
|
examples/test/__main__.py
|
Cjreynol/pygame-boilerplate
|
a43ac4fa2eec6ba5fde422a57dd4f4e0aa1f23ff
|
[
"MIT"
] | null | null | null |
examples/test/__main__.py
|
Cjreynol/pygame-boilerplate
|
a43ac4fa2eec6ba5fde422a57dd4f4e0aa1f23ff
|
[
"MIT"
] | null | null | null |
examples/test/__main__.py
|
Cjreynol/pygame-boilerplate
|
a43ac4fa2eec6ba5fde422a57dd4f4e0aa1f23ff
|
[
"MIT"
] | null | null | null |
from boilerplate.game import Game
from examples.test.game_state import GameState
def main():
game = Game("Example")
state = GameState()
game.dirty_rect_run(state)
if __name__ == "__main__":
main()
| 17.615385
| 48
| 0.659389
|
b39698c66f68dd4618a58dc6ece6f420e4e5b8f7
| 53
|
py
|
Python
|
components/__init__.py
|
YetAnotherTimeTracker/yatt
|
5cd75da60aecd51ee6dc67bb8dc3662b50a3b4a3
|
[
"MIT"
] | 1
|
2017-12-04T22:45:54.000Z
|
2017-12-04T22:45:54.000Z
|
components/__init__.py
|
avbelyaev/yatt
|
5cd75da60aecd51ee6dc67bb8dc3662b50a3b4a3
|
[
"MIT"
] | 17
|
2017-10-15T01:38:39.000Z
|
2017-12-18T22:00:32.000Z
|
components/__init__.py
|
avbelyaev/yatt
|
5cd75da60aecd51ee6dc67bb8dc3662b50a3b4a3
|
[
"MIT"
] | 12
|
2017-11-02T19:51:54.000Z
|
2020-11-29T17:35:53.000Z
|
"""
Created by anthony on 21.11.2017
__init__.py
"""
| 10.6
| 32
| 0.679245
|
4dfddf8506ed5480e100f275770b354b1e5327cf
| 746
|
py
|
Python
|
backend/webserver/sockets/projects.py
|
jsbroks/lista
|
4bf06f3a243c7c59632ce2348d1a9f6917878906
|
[
"MIT"
] | null | null | null |
backend/webserver/sockets/projects.py
|
jsbroks/lista
|
4bf06f3a243c7c59632ce2348d1a9f6917878906
|
[
"MIT"
] | null | null | null |
backend/webserver/sockets/projects.py
|
jsbroks/lista
|
4bf06f3a243c7c59632ce2348d1a9f6917878906
|
[
"MIT"
] | null | null | null |
from webserver.extensions import db, socketio
from flask_login import current_user
from flask_socketio import emit
from webserver.config import logger
from .utils import authenticated_only, commit_or_null
@socketio.on('join project')
@authenticated_only
def join_project(id):
"""
Join a project (if user has access). Joining a projects room allows for user
to get realtime updates
"""
project_id = int(id)
project = current_user.projects.filter_by(id=project_id).first()
if project:
logger.debug(
f'{current_user.username} has joined room {project.name} ({project.id})')
project.join_room()
return project is not None
@socketio.on('leave project')
def leave_prject():
pass
| 23.3125
| 85
| 0.717158
|
936ffa01dc67032419dae484f9ac633b8a37c20e
| 12,001
|
py
|
Python
|
bungieapi/generated/components/schemas/destiny/components/profiles.py
|
itemmanager/bungieapi
|
0c4326f88ea0f28a1dcab683dc08c8d21c940fc1
|
[
"MIT"
] | 5
|
2022-01-06T21:05:53.000Z
|
2022-02-12T19:58:11.000Z
|
bungieapi/generated/components/schemas/destiny/components/profiles.py
|
itemmanager/bungieapi
|
0c4326f88ea0f28a1dcab683dc08c8d21c940fc1
|
[
"MIT"
] | 8
|
2021-12-25T02:40:56.000Z
|
2022-03-28T03:31:41.000Z
|
bungieapi/generated/components/schemas/destiny/components/profiles.py
|
itemmanager/bungieapi
|
0c4326f88ea0f28a1dcab683dc08c8d21c940fc1
|
[
"MIT"
] | 1
|
2022-01-30T23:53:25.000Z
|
2022-01-30T23:53:25.000Z
|
# generated by update to not change manually
import dataclasses as dt
import typing as t
from bungieapi.json import to_json
from bungieapi.types import ManifestReference
@dt.dataclass(frozen=True)
class DestinyProfileProgressionComponent:
"""The set of progression-related information that applies at a Profile-
wide level for your Destiny experience.
This differs from the Jimi Hendrix Experience because there's less
guitars on fire. Yet. #spoileralert? This will include information
such as Checklist info.
"""
checklists: t.Mapping[str, t.Mapping[str, bool]] = dt.field(
metadata={
"description": """The set of checklists that can be examined on a profile-wide basis, keyed by the hash identifier of the Checklist (DestinyChecklistDefinition)
For each checklist returned, its value is itself a Dictionary keyed by the checklist's hash identifier with the value being a boolean indicating if it's been discovered yet."""
}
)
seasonal_artifact: "DestinyArtifactProfileScoped" = dt.field(
metadata={
"description": "Data related to your progress on the current season's artifact that is the same across characters."
}
)
def to_json(self) -> t.Mapping[str, t.Any]:
return {
"checklists": to_json(self.checklists),
"seasonalArtifact": to_json(self.seasonal_artifact),
}
@dt.dataclass(frozen=True)
class DestinyProfileTransitoryComponent:
"""This is an experimental set of data that Bungie considers to be "transitory" - information that may be useful for API users, but that is coming from a non-authoritative data source about information that could potentially change at a more frequent pace than Bungie.net will receive updates about it.
This information is provided exclusively for convenience should any of it be useful to users: we provide no guarantees to the accuracy or timeliness of data that comes from this source. Know that this data can potentially be out-of-date or even wrong entirely if the user disconnected from the game or suddenly changed their status before we can receive refreshed data."""
current_activity: "DestinyProfileTransitoryCurrentActivity" = dt.field(
metadata={
"description": "If you are in an activity, this is some transitory info about the activity currently being played."
}
)
joinability: "DestinyProfileTransitoryJoinability" = dt.field(
metadata={
"description": "Information about whether and what might prevent you from joining this person on a fireteam."
}
)
party_members: t.Sequence["DestinyProfileTransitoryPartyMember"] = dt.field(
metadata={
"description": "If you have any members currently in your party, this is some (very) bare-bones information about those members."
}
)
tracking: t.Sequence["DestinyProfileTransitoryTrackingEntry"] = dt.field(
metadata={"description": "Information about tracked entities."}
)
last_orbited_destination_hash: t.Optional[
ManifestReference["DestinyDestinationDefinition"]
] = dt.field(
default=None,
metadata={
"description": "The hash identifier for the DestinyDestinationDefinition of the last location you were orbiting when in orbit."
},
)
def to_json(self) -> t.Mapping[str, t.Any]:
return {
"partyMembers": to_json(self.party_members),
"currentActivity": to_json(self.current_activity),
"joinability": to_json(self.joinability),
"tracking": to_json(self.tracking),
"lastOrbitedDestinationHash": to_json(self.last_orbited_destination_hash),
}
@dt.dataclass(frozen=True)
class DestinyProfileTransitoryPartyMember:
"""This is some bare minimum information about a party member in a
Fireteam.
Unfortunately, without great computational expense on our side we
can only get at the data contained here. I'd like to give you a
character ID for example, but we don't have it. But we do have these
three pieces of information. May they help you on your quest to show
meaningful data about current Fireteams. Notably, we don't and can't
feasibly return info on characters. If you can, try to use just the
data below for your UI and purposes. Only hit us with further
queries if you absolutely must know the character ID of the
currently playing character. Pretty please with sugar on top.
"""
display_name: str = dt.field(
metadata={"description": "The player's last known display name."}
)
emblem_hash: ManifestReference["DestinyInventoryItemDefinition"] = dt.field(
metadata={
"description": "The identifier for the DestinyInventoryItemDefinition of the player's emblem."
}
)
membership_id: int = dt.field(
metadata={"description": "The Membership ID that matches the party member."}
)
status: "DestinyPartyMemberStates" = dt.field(
metadata={
"description": "A Flags Enumeration value indicating the states that the player is in relevant to being on a fireteam."
}
)
def to_json(self) -> t.Mapping[str, t.Any]:
return {
"membershipId": to_json(self.membership_id),
"emblemHash": to_json(self.emblem_hash),
"displayName": to_json(self.display_name),
"status": to_json(self.status),
}
@dt.dataclass(frozen=True)
class DestinyProfileTransitoryCurrentActivity:
"""If you are playing in an activity, this is some information about it.
Note that we cannot guarantee any of this resembles what ends up in
the PGCR in any way. They are sourced by two entirely separate
systems with their own logic, and the one we source this data from
should be considered non-authoritative in comparison.
"""
highest_opposing_faction_score: float = dt.field(
metadata={
"description": "If you have human opponents, this is the highest opposing team's score."
}
)
number_of_opponents: int = dt.field(
metadata={
"description": "This is how many human or poorly crafted aimbot opponents you have."
}
)
number_of_players: int = dt.field(
metadata={
"description": "This is how many human or poorly crafted aimbots are on your team."
}
)
score: float = dt.field(
metadata={
"description": "This is what our non-authoritative source thought the score was."
}
)
end_time: t.Optional[str] = dt.field(
default=None,
metadata={
"description": 'If you\'re still in it but it "ended" (like when folks are dancing around the loot after they beat a boss), this is when the activity ended.'
},
)
start_time: t.Optional[str] = dt.field(
default=None, metadata={"description": "When the activity started."}
)
def to_json(self) -> t.Mapping[str, t.Any]:
return {
"startTime": to_json(self.start_time),
"endTime": to_json(self.end_time),
"score": to_json(self.score),
"highestOpposingFactionScore": to_json(self.highest_opposing_faction_score),
"numberOfOpponents": to_json(self.number_of_opponents),
"numberOfPlayers": to_json(self.number_of_players),
}
@dt.dataclass(frozen=True)
class DestinyProfileTransitoryJoinability:
"""Some basic information about whether you can be joined, how many slots
are left etc.
Note that this can change quickly, so it may not actually be useful.
But perhaps it will be in some use cases?
"""
closed_reasons: "DestinyJoinClosedReasons" = dt.field(
metadata={
"description": "Reasons why a person can't join this person's fireteam."
}
)
open_slots: int = dt.field(
metadata={
"description": "The number of slots still available on this person's fireteam."
}
)
privacy_setting: "DestinyGamePrivacySetting" = dt.field(
metadata={"description": "Who the person is currently allowing invites from."}
)
def to_json(self) -> t.Mapping[str, t.Any]:
return {
"openSlots": to_json(self.open_slots),
"privacySetting": to_json(self.privacy_setting),
"closedReasons": to_json(self.closed_reasons),
}
@dt.dataclass(frozen=True)
class DestinyProfileTransitoryTrackingEntry:
"""This represents a single "thing" being tracked by the player.
This can point to many types of entities, but only a subset of them
will actually have a valid hash identifier for whatever it is being
pointed to. It's up to you to interpret what it means when various
combinations of these entries have values being tracked.
"""
activity_hash: t.Optional[
ManifestReference["DestinyActivityDefinition"]
] = dt.field(
default=None,
metadata={
"description": "OPTIONAL - If this is tracking the status of a DestinyActivityDefinition, this is the identifier for that activity."
},
)
item_hash: t.Optional[
ManifestReference["DestinyInventoryItemDefinition"]
] = dt.field(
default=None,
metadata={
"description": "OPTIONAL - If this is tracking the status of a DestinyInventoryItemDefinition, this is the identifier for that item."
},
)
location_hash: t.Optional[
ManifestReference["DestinyLocationDefinition"]
] = dt.field(
default=None,
metadata={
"description": "OPTIONAL - If this is tracking a DestinyLocationDefinition, this is the identifier for that location."
},
)
objective_hash: t.Optional[
ManifestReference["DestinyObjectiveDefinition"]
] = dt.field(
default=None,
metadata={
"description": "OPTIONAL - If this is tracking the status of a DestinyObjectiveDefinition, this is the identifier for that objective."
},
)
questline_item_hash: t.Optional[
ManifestReference["DestinyInventoryItemDefinition"]
] = dt.field(
default=None,
metadata={
"description": "OPTIONAL - If this is tracking the status of a quest, this is the identifier for the DestinyInventoryItemDefinition that containst that questline data."
},
)
tracked_date: t.Optional[str] = dt.field(
default=None,
metadata={
"description": """OPTIONAL - I've got to level with you, I don't really know what this is. Is it when you started tracking it? Is it only populated for tracked items that have time limits?
I don't know, but we can get at it - when I get time to actually test what it is, I'll update this. In the meantime, bask in the mysterious data."""
},
)
def to_json(self) -> t.Mapping[str, t.Any]:
return {
"locationHash": to_json(self.location_hash),
"itemHash": to_json(self.item_hash),
"objectiveHash": to_json(self.objective_hash),
"activityHash": to_json(self.activity_hash),
"questlineItemHash": to_json(self.questline_item_hash),
"trackedDate": to_json(self.tracked_date),
}
from bungieapi.generated.components.schemas.destiny import ( # noqa: E402
DestinyGamePrivacySetting,
DestinyJoinClosedReasons,
DestinyPartyMemberStates,
)
# imported at the end to do not case circular imports for type annotations
from bungieapi.generated.components.schemas.destiny.artifacts import ( # noqa: E402
DestinyArtifactProfileScoped,
)
from bungieapi.generated.components.schemas.destiny.definitions import ( # noqa: E402
DestinyActivityDefinition,
DestinyDestinationDefinition,
DestinyInventoryItemDefinition,
DestinyLocationDefinition,
DestinyObjectiveDefinition,
)
| 41.382759
| 376
| 0.679193
|
c289ae806ce6d05882b663db1d969ae8579a3d89
| 455
|
py
|
Python
|
week07/lecture/examples/src7/favorites/favorites2.py
|
uldash/CS50x
|
c3ee0f42ad514b57a13c3ffbb96238b3ca3730e1
|
[
"MIT"
] | null | null | null |
week07/lecture/examples/src7/favorites/favorites2.py
|
uldash/CS50x
|
c3ee0f42ad514b57a13c3ffbb96238b3ca3730e1
|
[
"MIT"
] | null | null | null |
week07/lecture/examples/src7/favorites/favorites2.py
|
uldash/CS50x
|
c3ee0f42ad514b57a13c3ffbb96238b3ca3730e1
|
[
"MIT"
] | null | null | null |
# Prints unique titles in CSV, case sensitively
import csv
# For accumulating (and later sorting) titles
titles = set()
# Open CSV file
with open("Favorite TV Shows - Form Responses 1.csv", "r") as file:
# Create DictReader
reader = csv.DictReader(file)
# Iterate over CSV file, adding each title to set
for row in reader:
titles.add(row["title"])
# Print titles in sorted order
for title in sorted(titles):
print(title)
| 21.666667
| 67
| 0.69011
|
91696720a13382b530938fbb55848318bdb4006d
| 448
|
py
|
Python
|
023_lambda.py
|
MikePolyakov/python_book
|
497681e8a167918a19ae737960c9c86ebffa9e91
|
[
"MIT"
] | null | null | null |
023_lambda.py
|
MikePolyakov/python_book
|
497681e8a167918a19ae737960c9c86ebffa9e91
|
[
"MIT"
] | null | null | null |
023_lambda.py
|
MikePolyakov/python_book
|
497681e8a167918a19ae737960c9c86ebffa9e91
|
[
"MIT"
] | null | null | null |
lambda x: x**2
def sqr(x):
return x**2
print(' '.join(map(lambda x: str(x**2), range(1, 101))))
n = int(input())
points = []
for i in range(n):
point = tuple(map(int, input().split()))
points.append(point)
points.sort(key=lambda point: point[0]**2 + point[1]**2)
for point in points:
print(' '.join(map(str, point)))
def traditionalSqr(x):
return x**2
lambdaSqr = lambda x: x**2
print(traditionalSqr(3))
print(lambdaSqr(3))
| 20.363636
| 56
| 0.625
|
e4102bfc2adbe15366829d477debcbad01e76f75
| 39,011
|
py
|
Python
|
unittests/test_peer_channels.py
|
electrumsv/electrumsv-reference-server
|
b8fb0773f304e7930f0be867ef7b4bf9e5b95cab
|
[
"OML"
] | null | null | null |
unittests/test_peer_channels.py
|
electrumsv/electrumsv-reference-server
|
b8fb0773f304e7930f0be867ef7b4bf9e5b95cab
|
[
"OML"
] | 2
|
2021-11-24T01:22:55.000Z
|
2021-12-02T13:47:12.000Z
|
unittests/test_peer_channels.py
|
electrumsv/electrumsv-reference-server
|
b8fb0773f304e7930f0be867ef7b4bf9e5b95cab
|
[
"OML"
] | 1
|
2021-11-23T01:00:38.000Z
|
2021-11-23T01:00:38.000Z
|
from __future__ import annotations
import asyncio
import base64
import datetime
from http import HTTPStatus
import json
import logging
import os
from pathlib import Path
try:
# Linux expects the latest package version of 3.35.4 (as of pysqlite-binary 0.4.6)
import pysqlite3 as sqlite3
except ModuleNotFoundError:
# MacOS has latest brew version of 3.35.5 (as of 2021-06-20).
# Windows builds use the official Python 3.10.0 builds and bundled version of 3.35.5.
import sqlite3 # type: ignore
import aiohttp
from aiohttp import web, WSServerHandshakeError
from bitcoinx import PrivateKey, PublicKey
from electrumsv_database.sqlite import replace_db_context_with_connection
import pytest
import requests
from esv_reference_server.application_state import ApplicationState
from esv_reference_server.errors import WebsocketUnauthorizedException
from esv_reference_server import sqlite_db
from .conftest import _wrong_auth_type, _bad_token, _successful_call, _no_auth, \
_subscribe_to_general_notifications_peer_channels, TEST_EXTERNAL_HOST, TEST_EXTERNAL_PORT, \
WS_URL_GENERAL
WS_URL_TEMPLATE_MSG_BOX = "ws://"+ TEST_EXTERNAL_HOST +":"+ str(TEST_EXTERNAL_PORT) + \
"/api/v1/channel/{channelid}/notify"
PRIVATE_KEY_1 = PrivateKey.from_hex(
"720f1987db69efa562b3dabd78e51f19bd8da76c70ad839b72b939f4071b144b")
PUBLIC_KEY_1: PublicKey = PRIVATE_KEY_1.public_key
REF_TYPE_OUTPUT = 0
REF_TYPE_INPUT = 1
STREAM_TERMINATION_BYTE = b"\x00"
MODULE_DIR = Path(os.path.dirname(os.path.abspath(__file__)))
CHANNEL_ID: str = ""
CHANNEL_BEARER_TOKEN: str = ""
CHANNEL_BEARER_TOKEN_ID: int = 0
CHANNEL_READ_ONLY_TOKEN: str = ""
CHANNEL_READ_ONLY_TOKEN_ID: int = 0
class TestAiohttpRESTAPI:
logger = logging.getLogger("test-aiohttp-rest-api")
_account_id: int
_api_key: str
@classmethod
def setup_class(cls) -> None:
assert ApplicationState.singleton_reference is not None
application_state = ApplicationState.singleton_reference()
assert application_state is not None
cls._account_id, cls._api_key = application_state.database_context.run_in_thread(
sqlite_db.create_account, PUBLIC_KEY_1.to_bytes(compressed=True))
def setup_method(self) -> None:
pass
def teardown_method(self) -> None:
pass
@classmethod
def teardown_class(cls) -> None:
pass
async def _create_new_channel(self) -> tuple[str, str, str]:
URL = "http://{host}:{port}/api/v1/channel/manage".format(host=TEST_EXTERNAL_HOST,
port=TEST_EXTERNAL_PORT)
request_body = {
"public_read": True,
"public_write": True,
"sequenced": True,
"retention": {
"min_age_days": 0,
"max_age_days": 0,
"auto_prune": True
}
}
self.logger.debug("test_create_new_channel url: %s", URL)
async with aiohttp.ClientSession() as session:
headers = {"Authorization": f"Bearer {self._api_key}"}
async with session.post(URL, headers=headers, json=request_body) as resp:
self.logger.debug("resp.content = %s", resp.content)
assert resp.status == 200, resp.reason
single_channel_data = await resp.json()
CHANNEL_ID = single_channel_data['id']
CHANNEL_BEARER_TOKEN = single_channel_data['access_tokens'][0]['token']
CHANNEL_BEARER_TOKEN_ID = single_channel_data['access_tokens'][0]['id']
return CHANNEL_ID, CHANNEL_BEARER_TOKEN, CHANNEL_BEARER_TOKEN_ID
async def _create_read_only_token(self, CHANNEL_ID: str) -> tuple[str, str]:
URL = "http://"+ TEST_EXTERNAL_HOST +":"+ str(TEST_EXTERNAL_PORT) + \
"/api/v1/channel/manage/{channelid}/api-token"
request_body = {
"description": "websocket read only token",
"can_read": True,
"can_write": False
}
url = URL.format(channelid=CHANNEL_ID)
self.logger.debug("test_create_new_token_for_channel url: %s", url)
async with aiohttp.ClientSession() as session:
headers = {"Authorization": f"Bearer {self._api_key}"}
async with session.post(url, headers=headers, json=request_body) as resp:
self.logger.debug("resp.content = %s", resp.content)
assert resp.status == 200, resp.reason
response_body = await resp.json()
CHANNEL_READ_ONLY_TOKEN_ID = response_body['id']
CHANNEL_READ_ONLY_TOKEN = response_body['token']
return CHANNEL_READ_ONLY_TOKEN_ID, CHANNEL_READ_ONLY_TOKEN
@pytest.mark.asyncio
def test_ping(self) -> None:
URL = "http://{host}:{port}/".format(host=TEST_EXTERNAL_HOST, port=TEST_EXTERNAL_PORT)
result = requests.get(URL)
assert result.text is not None
@pytest.mark.asyncio
def test_create_new_channel(self) -> None:
URL = 'http://{host}:{port}/api/v1/channel/manage'.format(host=TEST_EXTERNAL_HOST,
port=TEST_EXTERNAL_PORT)
HTTP_METHOD = 'post'
_no_auth(URL, HTTP_METHOD)
_wrong_auth_type(URL, HTTP_METHOD)
_bad_token(URL, HTTP_METHOD)
request_body = {
"public_read": True,
"public_write": True,
"sequenced": True,
"retention": {
"min_age_days": 0,
"max_age_days": 0,
"auto_prune": True
}
}
self.logger.debug("test_create_new_channel url: %s", URL)
result = _successful_call(URL, HTTP_METHOD, None,
request_body, self._api_key)
assert result.status_code == 200, result.reason
response_body = result.json()
# self.logger.debug(json.dumps(response_body, indent=4))
single_channel_data = response_body
CHANNEL_ID = single_channel_data['id']
assert single_channel_data['href'] == \
f"http://{TEST_EXTERNAL_HOST}:{TEST_EXTERNAL_PORT}/api/v1/channel/{CHANNEL_ID}"
assert single_channel_data['public_read'] is True
assert single_channel_data['public_write'] is True
assert single_channel_data['sequenced'] is True
assert single_channel_data['retention'] == {"min_age_days": 0, "max_age_days": 0, \
"auto_prune": True}
assert isinstance(single_channel_data['access_tokens'], list)
assert single_channel_data['access_tokens'][0]['id'] == 1
issued_token_bytes = \
base64.urlsafe_b64decode(single_channel_data['access_tokens'][0]['token'])
assert len(issued_token_bytes) == 64
assert single_channel_data['access_tokens'][0]['description'] == "Owner"
assert single_channel_data['access_tokens'][0]['can_read'] is True
assert single_channel_data['access_tokens'][0]['can_write'] is True
@pytest.mark.asyncio
async def test_create_new_token_for_channel(self) -> None:
CHANNEL_ID, CHANNEL_BEARER_TOKEN, CHANNEL_BEARER_TOKEN_ID = await self._create_new_channel()
# handler: create_new_token_for_channel
URL = "http://"+ TEST_EXTERNAL_HOST +":"+ str(TEST_EXTERNAL_PORT) + \
"/api/v1/channel/manage/{channelid}/api-token"
HTTP_METHOD = 'post'
_no_auth(URL, HTTP_METHOD)
_wrong_auth_type(URL, HTTP_METHOD)
_bad_token(URL, HTTP_METHOD)
request_body = {
"description": "some description",
"can_read": True,
"can_write": False
}
url = URL.format(channelid=CHANNEL_ID)
self.logger.debug("test_create_new_token_for_channel url: %s", url)
result = _successful_call(url, HTTP_METHOD, None,
request_body, self._api_key)
assert result.status_code == 200, result.reason
response_body = result.json()
assert len(base64.urlsafe_b64decode(response_body['token'])) == 64
expected_response_body = {
"id": 3,
"token": response_body['token'],
"description": "some description",
"can_read": True,
"can_write": False
}
assert response_body == expected_response_body
@pytest.mark.asyncio
def test_list_channels(self) -> None:
# handler: list_channels
URL = "http://"+ TEST_EXTERNAL_HOST +":"+ str(TEST_EXTERNAL_PORT) + \
"/api/v1/channel/manage/list"
HTTP_METHOD = 'get'
_no_auth(URL, HTTP_METHOD)
_wrong_auth_type(URL, HTTP_METHOD)
_bad_token(URL, HTTP_METHOD)
request_body = None
self.logger.debug("test_list_channels url: %s", URL)
result = _successful_call(URL, HTTP_METHOD, None,
request_body, self._api_key)
assert result.status_code == 200, result.reason
response_body = result.json()
# self.logger.debug(json.dumps(response_body, indent=4))
assert isinstance(response_body, list)
assert len(response_body) == 2
for single_channel_data in response_body:
# assert single_channel_data['href'] == \
# f"http://{TEST_HOST}:{TEST_PORT}/api/v1/channel/{CHANNEL_ID}"
assert single_channel_data['public_read'] is True
assert single_channel_data['public_write'] is True
assert single_channel_data['sequenced'] is True
assert single_channel_data['retention'] == {"min_age_days": 0, "max_age_days": 0,
"auto_prune": True}
assert isinstance(single_channel_data['access_tokens'], list)
assert isinstance(single_channel_data['access_tokens'][0]['id'], int)
issued_token_bytes = base64.urlsafe_b64decode(
single_channel_data['access_tokens'][0]['token'])
assert len(issued_token_bytes) == 64
# assert single_channel_data['access_tokens'][0]['token'] == CHANNEL_BEARER_TOKEN
assert single_channel_data['access_tokens'][0]['description'] == "Owner"
assert single_channel_data['access_tokens'][0]['can_read'] is True
assert single_channel_data['access_tokens'][0]['can_write'] is True
@pytest.mark.asyncio
async def test_get_single_channel_details(self) -> None:
CHANNEL_ID, CHANNEL_BEARER_TOKEN, CHANNEL_BEARER_TOKEN_ID = await self._create_new_channel()
# handler: get_single_channel_details
URL = "http://"+ TEST_EXTERNAL_HOST +":"+ str(TEST_EXTERNAL_PORT) + \
"/api/v1/channel/manage/{channelid}"
HTTP_METHOD = 'get'
_no_auth(URL, HTTP_METHOD)
_wrong_auth_type(URL, HTTP_METHOD)
_bad_token(URL, HTTP_METHOD)
request_body = None
url = URL.format(channelid=CHANNEL_ID)
self.logger.debug("test_get_single_channel_details url: %s", url)
result = _successful_call(url, HTTP_METHOD, None,
request_body, self._api_key)
assert result.status_code == 200, result.reason
response_body = result.json()
# self.logger.debug(json.dumps(response_body, indent=4))
single_channel_data = response_body
assert single_channel_data['href'] == \
f"http://{TEST_EXTERNAL_HOST}:{TEST_EXTERNAL_PORT}/api/v1/channel/{CHANNEL_ID}"
assert single_channel_data['public_read'] is True
assert single_channel_data['public_write'] is True
assert single_channel_data['sequenced'] is True
assert single_channel_data['retention'] == {"min_age_days": 0, "max_age_days": 0,
"auto_prune": True}
assert isinstance(single_channel_data['access_tokens'], list)
assert isinstance(single_channel_data['access_tokens'][0]['id'], int)
issued_token_bytes = \
base64.urlsafe_b64decode(single_channel_data['access_tokens'][0]['token'])
assert len(issued_token_bytes) == 64
assert single_channel_data['access_tokens'][0]['description'] == "Owner"
assert single_channel_data['access_tokens'][0]['can_read'] is True
assert single_channel_data['access_tokens'][0]['can_write'] is True
@pytest.mark.asyncio
async def test_update_single_channel_properties(self) -> None:
CHANNEL_ID, CHANNEL_BEARER_TOKEN, CHANNEL_BEARER_TOKEN_ID = await self._create_new_channel()
# handler: update_single_channel_properties
URL = "http://"+ TEST_EXTERNAL_HOST +":"+ str(TEST_EXTERNAL_PORT) + \
"/api/v1/channel/manage/{channelid}"
HTTP_METHOD = 'post'
_no_auth(URL, HTTP_METHOD)
_wrong_auth_type(URL, HTTP_METHOD)
_bad_token(URL, HTTP_METHOD)
request_body = {
"public_read": True,
"public_write": True,
"locked": False
}
url = URL.format(channelid=CHANNEL_ID)
self.logger.debug("test_update_single_channel_properties url: %s", url)
result = _successful_call(url, HTTP_METHOD, None,
request_body, self._api_key)
assert result.status_code == 200, result.reason
response_body = result.json()
# self.logger.debug(json.dumps(response_body, indent=4))
assert response_body == request_body
@pytest.mark.asyncio
async def test_get_token_details(self) -> None:
CHANNEL_ID, CHANNEL_BEARER_TOKEN, CHANNEL_BEARER_TOKEN_ID = await self._create_new_channel()
CHANNEL_READ_ONLY_TOKEN_ID, CHANNEL_READ_ONLY_TOKEN = \
await self._create_read_only_token(CHANNEL_ID)
expected_response_body = {
"id": CHANNEL_READ_ONLY_TOKEN_ID,
"token": CHANNEL_READ_ONLY_TOKEN,
"description": "websocket read only token",
"can_read": True,
"can_write": False
}
# handler: get_token_details
URL = 'http://{host}:{port}/api/v1/channel/manage/{channelid}/api-token/{tokenid}'\
.format(host=TEST_EXTERNAL_HOST, port=TEST_EXTERNAL_PORT, channelid=CHANNEL_ID,
tokenid=CHANNEL_READ_ONLY_TOKEN_ID)
HTTP_METHOD = 'get'
_no_auth(URL, HTTP_METHOD)
_wrong_auth_type(URL, HTTP_METHOD)
_bad_token(URL, HTTP_METHOD)
request_body = None
self.logger.debug("test_get_token_details url: %s", URL)
result = _successful_call(URL, HTTP_METHOD, None,
request_body, self._api_key)
assert result.status_code == 200, result.reason
response_body = result.json()
self.logger.debug(json.dumps(response_body, indent=4))
assert response_body == expected_response_body
@pytest.mark.asyncio
async def test_get_list_of_tokens(self) -> None:
CHANNEL_ID, CHANNEL_BEARER_TOKEN, CHANNEL_BEARER_TOKEN_ID = await self._create_new_channel()
CHANNEL_READ_ONLY_TOKEN_ID, CHANNEL_READ_ONLY_TOKEN = \
await self._create_read_only_token(CHANNEL_ID)
expected_response_body = [
{
"id": CHANNEL_BEARER_TOKEN_ID,
"token": CHANNEL_BEARER_TOKEN,
"description": "Owner",
"can_read": True,
"can_write": True
},
{
"id": CHANNEL_READ_ONLY_TOKEN_ID,
"token": CHANNEL_READ_ONLY_TOKEN,
"description": "websocket read only token",
"can_read": True,
"can_write": False
}
]
# handler: get_list_of_tokens
URL = 'http://{host}:{port}/api/v1/channel/manage/{channelid}/api-token'\
.format(host=TEST_EXTERNAL_HOST, port=TEST_EXTERNAL_PORT, channelid=CHANNEL_ID)
HTTP_METHOD = 'get'
_no_auth(URL, HTTP_METHOD)
_wrong_auth_type(URL, HTTP_METHOD)
_bad_token(URL, HTTP_METHOD)
request_body = None
self.logger.debug("test_get_list_of_tokens url: %s", URL)
result = _successful_call(URL, HTTP_METHOD, None,
request_body, self._api_key)
assert result.status_code == 200, result.reason
response_body = result.json()
self.logger.debug(json.dumps(response_body, indent=4))
assert response_body == expected_response_body
# MESSAGE MANAGEMENT APIS - USE CHANNEL-SPECIFIC BEARER TOKEN NOW
@pytest.mark.asyncio
async def test_write_message_no_content_type_should_raise_400(self) -> None:
CHANNEL_ID, CHANNEL_BEARER_TOKEN, CHANNEL_BEARER_TOKEN_ID = await self._create_new_channel()
# handler: write_message
URL = 'http://{host}:{port}/api/v1/channel/{channelid}'.format(host=TEST_EXTERNAL_HOST,
port=TEST_EXTERNAL_PORT, channelid=CHANNEL_ID)
HTTP_METHOD = 'post'
_no_auth(URL, HTTP_METHOD)
_wrong_auth_type(URL, HTTP_METHOD)
_bad_token(URL, HTTP_METHOD, headers={'Content-Type': 'application/json'})
request_body = {"key": "value"}
self.logger.debug("test_write_message_no_content_type_should_raise_400 url: %s", URL)
headers = {
"Content-Type": "",
}
result = _successful_call(URL, HTTP_METHOD, headers, request_body, CHANNEL_BEARER_TOKEN)
assert result.status_code == HTTPStatus.BAD_REQUEST, result.reason
assert result.reason is not None
@pytest.mark.asyncio
async def test_write_message_read_only_token_should_fail(self) -> None:
CHANNEL_ID, CHANNEL_BEARER_TOKEN, CHANNEL_BEARER_TOKEN_ID = await self._create_new_channel()
CHANNEL_READ_ONLY_TOKEN_ID, CHANNEL_READ_ONLY_TOKEN = \
await self._create_read_only_token(CHANNEL_ID)
headers = {}
headers["Content-Type"] = "application/json"
request_body = {
"key": "value"
}
# handler: write_message
URL = f"http://{TEST_EXTERNAL_HOST}:{TEST_EXTERNAL_PORT}/api/v1/channel/{CHANNEL_ID}"
HTTP_METHOD = 'post'
_no_auth(URL, HTTP_METHOD)
_wrong_auth_type(URL, HTTP_METHOD)
_bad_token(URL, HTTP_METHOD, headers={'Content-Type': 'application/json'})
self.logger.debug("test_write_message_read_only_token_should_fail url: %s", URL)
result = _successful_call(URL, HTTP_METHOD, headers,
request_body, CHANNEL_READ_ONLY_TOKEN)
assert result.status_code == 401, result.reason
def _write_message(self, CHANNEL_ID: str, CHANNEL_BEARER_TOKEN: str) -> requests.Response:
headers = {}
headers["Content-Type"] = "application/json"
request_body = {
"key": "value"
}
# handler: write_message
URL = f"http://{TEST_EXTERNAL_HOST}:{TEST_EXTERNAL_PORT}/api/v1/channel/{CHANNEL_ID}"
HTTP_METHOD = 'post'
_no_auth(URL, HTTP_METHOD)
_wrong_auth_type(URL, HTTP_METHOD)
_bad_token(URL, HTTP_METHOD, headers={'Content-Type': 'application/json'})
self.logger.debug("test_write_message url: %s", URL)
result = _successful_call(URL, HTTP_METHOD, headers,
request_body, CHANNEL_BEARER_TOKEN)
assert result.status_code == 200, result.reason
return result
@pytest.mark.asyncio
async def test_write_message(self) -> None:
"""Uses CHANNEL_BEARER_TOKEN to write messages for the CHANNEL_READ_ONLY_TOKEN to read."""
CHANNEL_ID, CHANNEL_BEARER_TOKEN, CHANNEL_BEARER_TOKEN_ID = await self._create_new_channel()
headers = {}
headers["Content-Type"] = "application/json"
request_body = {
"key": "value"
}
# handler: write_message
URL = f"http://{TEST_EXTERNAL_HOST}:{TEST_EXTERNAL_PORT}/api/v1/channel/{CHANNEL_ID}"
HTTP_METHOD = 'post'
_no_auth(URL, HTTP_METHOD)
_wrong_auth_type(URL, HTTP_METHOD)
_bad_token(URL, HTTP_METHOD, headers={'Content-Type': 'application/json'})
self.logger.debug("test_write_message url: %s", URL)
result = _successful_call(URL, HTTP_METHOD, headers,
request_body, CHANNEL_BEARER_TOKEN)
assert result.status_code == 200, result.reason
response_body = result.json()
assert isinstance(response_body['sequence'], int)
assert isinstance(datetime.datetime.fromisoformat(response_body['received']),
datetime.datetime)
assert response_body['content_type'] == 'application/json'
assert response_body['payload'] == {'key': 'value'}
@pytest.mark.asyncio
async def test_get_messages_head(self) -> None:
CHANNEL_ID, CHANNEL_BEARER_TOKEN, CHANNEL_BEARER_TOKEN_ID = await self._create_new_channel()
CHANNEL_READ_ONLY_TOKEN_ID, CHANNEL_READ_ONLY_TOKEN = \
await self._create_read_only_token(CHANNEL_ID)
self._write_message(CHANNEL_ID, CHANNEL_BEARER_TOKEN)
# handler: get_messages
URL = f"http://{TEST_EXTERNAL_HOST}:{TEST_EXTERNAL_PORT}/api/v1/channel/{CHANNEL_ID}"
HTTP_METHOD = 'head'
_no_auth(URL, HTTP_METHOD)
_wrong_auth_type(URL, HTTP_METHOD)
_bad_token(URL, HTTP_METHOD)
self.logger.debug("test_get_messages_head url: %s", URL)
result = _successful_call(URL, HTTP_METHOD, None, None,
CHANNEL_READ_ONLY_TOKEN)
assert result.headers['ETag'] == "1"
assert result.content == b''
@pytest.mark.asyncio
async def test_get_messages_unread_should_get_one(self) -> None:
CHANNEL_ID, CHANNEL_BEARER_TOKEN, CHANNEL_BEARER_TOKEN_ID = await self._create_new_channel()
CHANNEL_READ_ONLY_TOKEN_ID, CHANNEL_READ_ONLY_TOKEN = \
await self._create_read_only_token(CHANNEL_ID)
_response = self._write_message(CHANNEL_ID, CHANNEL_BEARER_TOKEN)
# handler: get_messages
query_params = "?unread=true"
URL = f"http://{TEST_EXTERNAL_HOST}:{TEST_EXTERNAL_PORT}/api/v1/channel/{CHANNEL_ID}" + \
query_params
HTTP_METHOD = 'get'
_no_auth(URL, HTTP_METHOD)
_wrong_auth_type(URL, HTTP_METHOD)
_bad_token(URL, HTTP_METHOD)
self.logger.debug("test_get_messages_head url: %s", URL)
result = _successful_call(URL, HTTP_METHOD, None, None,
CHANNEL_READ_ONLY_TOKEN)
assert result.headers['ETag'] == "1"
response_body = result.json()
assert isinstance(response_body, list)
assert response_body[0]['sequence'] == 1
assert isinstance(datetime.datetime.fromisoformat(response_body[0]['received']),
datetime.datetime)
assert response_body[0]['content_type'] == 'application/json'
assert response_body[0]['payload'] == {'key': 'value'}
@pytest.mark.asyncio
async def test_mark_message_read_or_unread(self) -> None:
CHANNEL_ID, CHANNEL_BEARER_TOKEN, CHANNEL_BEARER_TOKEN_ID = await self._create_new_channel()
CHANNEL_READ_ONLY_TOKEN_ID, CHANNEL_READ_ONLY_TOKEN = \
await self._create_read_only_token(CHANNEL_ID)
_response = self._write_message(CHANNEL_ID, CHANNEL_BEARER_TOKEN)
# handler: mark_message_read_or_unread
sequence = 1
query_params = "?older=true"
URL = f"http://{TEST_EXTERNAL_HOST}:{TEST_EXTERNAL_PORT}"+ \
f"/api/v1/channel/{CHANNEL_ID}/{sequence}" + query_params
HTTP_METHOD = 'post'
_no_auth(URL, HTTP_METHOD)
_wrong_auth_type(URL, HTTP_METHOD)
_bad_token(URL, HTTP_METHOD)
body = {"read": True}
result = _successful_call(URL, HTTP_METHOD, None, body,
CHANNEL_READ_ONLY_TOKEN)
assert result.status_code == 200, result.reason
sequence = 2
query_params = "?older=true"
URL = f"http://{TEST_EXTERNAL_HOST}:{TEST_EXTERNAL_PORT}"+ \
f"/api/v1/channel/{CHANNEL_ID}/{sequence}" + query_params
result = _successful_call(URL, HTTP_METHOD, None, body,
CHANNEL_READ_ONLY_TOKEN)
assert result.status_code == 404, result.reason
assert result.reason is not None
@pytest.mark.asyncio
async def test_delete_message_read_only_token_should_fail(self) -> None:
CHANNEL_ID, CHANNEL_BEARER_TOKEN, CHANNEL_BEARER_TOKEN_ID = await self._create_new_channel()
CHANNEL_READ_ONLY_TOKEN_ID, CHANNEL_READ_ONLY_TOKEN = \
await self._create_read_only_token(CHANNEL_ID)
_response = self._write_message(CHANNEL_ID, CHANNEL_BEARER_TOKEN)
# handler: delete_message
sequence = 1
URL = f"http://{TEST_EXTERNAL_HOST}:{TEST_EXTERNAL_PORT}"+ \
f"/api/v1/channel/{CHANNEL_ID}/{sequence}"
HTTP_METHOD = 'delete'
_no_auth(URL, HTTP_METHOD)
_wrong_auth_type(URL, HTTP_METHOD)
_bad_token(URL, HTTP_METHOD)
sequence = 1
url = URL.format(channelid=CHANNEL_ID, sequence=sequence)
result = _successful_call(url, HTTP_METHOD, None, None,
CHANNEL_READ_ONLY_TOKEN)
assert result.status_code == 401, result.reason
sequence = 2
url = URL.format(channelid=CHANNEL_ID, sequence=sequence)
result = _successful_call(url, HTTP_METHOD, None, None,
CHANNEL_READ_ONLY_TOKEN)
assert result.status_code == 401, result.reason
assert result.reason is not None
@pytest.mark.asyncio
async def test_delete_message_should_succeed(self) -> None:
CHANNEL_ID, CHANNEL_BEARER_TOKEN, CHANNEL_BEARER_TOKEN_ID = await self._create_new_channel()
_response = self._write_message(CHANNEL_ID, CHANNEL_BEARER_TOKEN)
sequence = 1
URL = f"http://{TEST_EXTERNAL_HOST}:{TEST_EXTERNAL_PORT}"+ \
f"/api/v1/channel/{CHANNEL_ID}/{sequence}"
HTTP_METHOD = 'delete'
_no_auth(URL, HTTP_METHOD)
_wrong_auth_type(URL, HTTP_METHOD)
_bad_token(URL, HTTP_METHOD)
result = _successful_call(URL, HTTP_METHOD, None, None,
CHANNEL_BEARER_TOKEN)
assert result.status_code == 200, result.reason
sequence = 2
url = URL.format(channelid=CHANNEL_ID, sequence=sequence)
result = _successful_call(url, HTTP_METHOD, None, None,
CHANNEL_BEARER_TOKEN)
assert result.status_code == 404, result.reason
assert result.reason is not None
async def _subscribe_to_msg_box_notifications(self, url: str, msg_box_api_token: str,
expected_count: int, completion_event: asyncio.Event) -> None:
count = 0
async with aiohttp.ClientSession() as session:
try:
async with session.ws_connect(url + f"?token={msg_box_api_token}", timeout=5.0) \
as ws:
self.logger.info('Connected to %s', url)
async for msg in ws:
if msg.type == aiohttp.WSMsgType.TEXT:
content = json.loads(msg.data)
self.logger.info('New message from msg box: %s', content)
count += 1
if expected_count == count:
self.logger.debug("Received all %s messages", expected_count)
await session.close()
completion_event.set()
return
if msg.type in (aiohttp.WSMsgType.CLOSE, aiohttp.WSMsgType.ERROR,
aiohttp.WSMsgType.CLOSED, aiohttp.WSMsgType.CLOSING):
self.logger.info("CLOSED")
break
except WSServerHandshakeError as e:
if e.status == 401:
raise WebsocketUnauthorizedException()
@pytest.mark.asyncio
def test_channels_websocket_bad_auth_should_fail(self) -> None:
async def wait_on_sub(url: str, msg_box_api_token: str, expected_count: int,
completion_event: asyncio.Event) -> None:
try:
await self._subscribe_to_msg_box_notifications(url, msg_box_api_token,
expected_count, completion_event)
except WebsocketUnauthorizedException:
self.logger.debug("Websocket unauthorized - bad token")
assert True # Auth should failed
completion_event = asyncio.Event()
url = WS_URL_TEMPLATE_MSG_BOX.format(channelid=CHANNEL_ID)
asyncio.run(wait_on_sub(url, "BAD_BEARER_TOKEN", 0, completion_event))
@pytest.mark.asyncio
def test_channels_websocket(self) -> None:
logger = logging.getLogger("websocket-test")
async def wait_on_sub(url: str, msg_box_api_token: str, expected_count: int,
completion_event: asyncio.Event) -> None:
try:
await self._subscribe_to_msg_box_notifications(url, msg_box_api_token,
expected_count, completion_event)
except WebsocketUnauthorizedException:
self.logger.debug("Auth failed")
assert False # Auth should have passed
async def push_messages(CHANNEL_ID: str, CHANNEL_BEARER_TOKEN: str,
expected_msg_count: int) -> None:
for i in range(expected_msg_count):
headers = {}
headers["Content-Type"] = "application/json"
headers["Authorization"] = f"Bearer {CHANNEL_BEARER_TOKEN}"
request_body = {"key": "value"}
url = f"http://{TEST_EXTERNAL_HOST}:{TEST_EXTERNAL_PORT}" + \
f"/api/v1/channel/{CHANNEL_ID}"
async with aiohttp.ClientSession() as session:
headers = {"Authorization": f"Bearer {CHANNEL_BEARER_TOKEN}"}
async with session.post(url, headers=headers, json=request_body) as resp:
self.logger.debug("push_messages = %s", await resp.json())
assert resp.status == 200, resp.reason
async def main() -> None:
CHANNEL_ID, CHANNEL_BEARER_TOKEN, CHANNEL_BEARER_TOKEN_ID = \
await self._create_new_channel()
CHANNEL_READ_ONLY_TOKEN_ID, CHANNEL_READ_ONLY_TOKEN = \
await self._create_read_only_token(CHANNEL_ID)
EXPECTED_MSG_COUNT = 10
logger.debug("CHANNEL_ID: %s", CHANNEL_ID)
logger.debug("CHANNEL_BEARER_TOKEN: %s", CHANNEL_BEARER_TOKEN)
logger.debug("CHANNEL_READ_ONLY_TOKEN: %s", CHANNEL_READ_ONLY_TOKEN)
completion_event = asyncio.Event()
url = WS_URL_TEMPLATE_MSG_BOX.format(channelid=CHANNEL_ID)
task1 = asyncio.create_task(wait_on_sub(url, CHANNEL_BEARER_TOKEN, EXPECTED_MSG_COUNT,
completion_event))
await asyncio.sleep(3)
task2 = asyncio.create_task(push_messages(CHANNEL_ID, CHANNEL_BEARER_TOKEN,
EXPECTED_MSG_COUNT))
await asyncio.gather(task1, task2)
await completion_event.wait()
asyncio.run(main())
@pytest.mark.asyncio
def test_general_purpose_websocket_bad_auth_should_fail(self) -> None:
async def wait_on_sub(url: str, api_token: str,
expected_count: int, completion_event: asyncio.Event) -> None:
try:
await _subscribe_to_general_notifications_peer_channels(url,
api_token, expected_count, completion_event)
except WebsocketUnauthorizedException:
self.logger.debug("Websocket unauthorized - bad token")
assert True # Auth should failed
completion_event = asyncio.Event()
url = WS_URL_GENERAL
asyncio.run(wait_on_sub(url, "BAD_BEARER_TOKEN", 0, completion_event))
@pytest.mark.asyncio
def test_general_purpose_websocket_peer_channel_notifications(self) -> None:
logger = logging.getLogger("websocket-test")
async def manage_general_websocket_connection(url: str, api_token: str, expected_count: int,
completion_event: asyncio.Event) -> None:
try:
await _subscribe_to_general_notifications_peer_channels(
url, api_token, expected_count, completion_event)
except WebsocketUnauthorizedException:
self.logger.debug("Auth failed")
assert False # Auth should have passed
async def push_messages(CHANNEL_ID: str, CHANNEL_BEARER_TOKEN: str,
expected_msg_count: int) -> None:
for i in range(expected_msg_count):
headers = {}
headers["Content-Type"] = "application/json"
headers["Authorization"] = f"Bearer {CHANNEL_BEARER_TOKEN}"
request_body = {"key": "value"}
url = f"http://{TEST_EXTERNAL_HOST}:{TEST_EXTERNAL_PORT}"+ \
f"/api/v1/channel/{CHANNEL_ID}"
async with aiohttp.ClientSession() as session:
headers = {"Authorization": f"Bearer {CHANNEL_BEARER_TOKEN}"}
async with session.post(url, headers=headers, json=request_body) as resp:
self.logger.debug("push_messages = %s", await resp.json())
assert resp.status == 200, resp.reason
async def main() -> None:
CHANNEL_ID, CHANNEL_BEARER_TOKEN, CHANNEL_BEARER_TOKEN_ID = \
await self._create_new_channel()
CHANNEL_READ_ONLY_TOKEN_ID, CHANNEL_READ_ONLY_TOKEN = \
await self._create_read_only_token(CHANNEL_ID)
EXPECTED_MSG_COUNT = 10
logger.debug("CHANNEL_ID: %s", CHANNEL_ID)
logger.debug("CHANNEL_BEARER_TOKEN: %s", CHANNEL_BEARER_TOKEN)
logger.debug("CHANNEL_READ_ONLY_TOKEN: %s", CHANNEL_READ_ONLY_TOKEN)
completion_event = asyncio.Event()
url = WS_URL_GENERAL
task1 = asyncio.create_task(
manage_general_websocket_connection(url, self._api_key, EXPECTED_MSG_COUNT,
completion_event))
await asyncio.sleep(3)
task2 = asyncio.create_task(push_messages(CHANNEL_ID, CHANNEL_BEARER_TOKEN,
EXPECTED_MSG_COUNT))
await asyncio.gather(task1, task2)
await completion_event.wait()
asyncio.run(main())
@pytest.mark.asyncio
async def test_revoke_selected_token(self) -> None:
CHANNEL_ID, CHANNEL_BEARER_TOKEN, CHANNEL_BEARER_TOKEN_ID = await self._create_new_channel()
CHANNEL_READ_ONLY_TOKEN_ID, CHANNEL_READ_ONLY_TOKEN = \
await self._create_read_only_token(CHANNEL_ID)
# handler: revoke_selected_token
URL = 'http://{host}:{port}/api/v1/channel/manage/{channelid}/api-token/{tokenid}'\
.format(host=TEST_EXTERNAL_HOST, port=TEST_EXTERNAL_PORT, channelid=CHANNEL_ID,
tokenid=CHANNEL_READ_ONLY_TOKEN_ID)
HTTP_METHOD = 'delete'
_no_auth(URL, HTTP_METHOD)
_wrong_auth_type(URL, HTTP_METHOD)
_bad_token(URL, HTTP_METHOD)
good_bearer_token = self._api_key
request_body = None
self.logger.debug("test_revoke_selected_token url: %s", URL)
result = _successful_call(URL, HTTP_METHOD, None,
request_body, good_bearer_token)
assert result.status_code == web.HTTPNoContent.status_code
def _revoke_token(self, CHANNEL_ID: str, CHANNEL_READ_ONLY_TOKEN_ID: str) -> requests.Response:
# handler: revoke_selected_token
URL = 'http://{host}:{port}/api/v1/channel/manage/{channelid}/api-token/{tokenid}'\
.format(host=TEST_EXTERNAL_HOST, port=TEST_EXTERNAL_PORT, channelid=CHANNEL_ID,
tokenid=CHANNEL_READ_ONLY_TOKEN_ID)
HTTP_METHOD = 'delete'
good_bearer_token = self._api_key
request_body = None
self.logger.debug("test_revoke_selected_token url: %s", URL)
result = _successful_call(URL, HTTP_METHOD, None,
request_body, good_bearer_token)
return result
@pytest.mark.asyncio
async def test_expired_token_should_fail(self) -> None:
CHANNEL_ID, CHANNEL_BEARER_TOKEN, CHANNEL_BEARER_TOKEN_ID = await self._create_new_channel()
CHANNEL_READ_ONLY_TOKEN_ID, CHANNEL_READ_ONLY_TOKEN = \
await self._create_read_only_token(CHANNEL_ID)
self._revoke_token(CHANNEL_ID, CHANNEL_READ_ONLY_TOKEN_ID)
# handler: get_token_details
URL = 'http://{host}:{port}/api/v1/channel/manage/{channelid}/api-token/{tokenid}'\
.format(host=TEST_EXTERNAL_HOST, port=TEST_EXTERNAL_PORT, channelid=CHANNEL_ID,
tokenid=CHANNEL_READ_ONLY_TOKEN_ID)
HTTP_METHOD = 'get'
_no_auth(URL, HTTP_METHOD)
_wrong_auth_type(URL, HTTP_METHOD)
_bad_token(URL, HTTP_METHOD)
expired_bearer_token = CHANNEL_READ_ONLY_TOKEN
request_body = None
self.logger.debug("test_revoke_selected_token url: %s", URL)
result = _successful_call(URL, HTTP_METHOD, None,
request_body, expired_bearer_token)
assert result.status_code == 401
@pytest.mark.asyncio
async def test_delete_channel(self) -> None:
assert ApplicationState.singleton_reference is not None
application_state = ApplicationState.singleton_reference()
assert application_state is not None
await self._create_new_channel()
@replace_db_context_with_connection
def read(db: sqlite3.Connection) -> list[str]:
rows = db.execute("SELECT externalid FROM msg_box").fetchall()
assert len(rows) > 0
return [ row[0] for row in rows ]
channel_ids_for_deletion = read(application_state.database_context)
URL_TEMPLATE = "http://"+ TEST_EXTERNAL_HOST +":"+ str(TEST_EXTERNAL_PORT) + \
"/api/v1/channel/manage/{channelid}"
HTTP_METHOD = 'delete'
_no_auth(URL_TEMPLATE, HTTP_METHOD)
_wrong_auth_type(URL_TEMPLATE, HTTP_METHOD)
_bad_token(URL_TEMPLATE, HTTP_METHOD)
good_bearer_token = self._api_key
for channel_id in channel_ids_for_deletion:
url = URL_TEMPLATE.format(channelid=channel_id)
self.logger.debug("test_delete_channel url: %s", url)
result = _successful_call(url, HTTP_METHOD, None,
None, good_bearer_token)
assert result.status_code == web.HTTPNoContent.status_code
@replace_db_context_with_connection
def read2(db: sqlite3.Connection) -> None:
rows = db.execute("SELECT * FROM msg_box").fetchall()
assert len(rows) == 0
read2(application_state.database_context)
@replace_db_context_with_connection
def read3(db: sqlite3.Connection) -> None:
rows = db.execute("SELECT * FROM msg_box_api_token").fetchall()
assert len(rows) == 0
read3(application_state.database_context)
| 43.783389
| 100
| 0.64833
|
b6148920c6c6c91fa5a789653480d5bb1c97be4b
| 524
|
py
|
Python
|
setup.py
|
Zedd1558/keras
|
01db9f0d7270735f7951602dac2b89e08b7785bf
|
[
"MIT"
] | 33
|
2015-11-25T23:11:02.000Z
|
2022-03-10T10:36:37.000Z
|
setup.py
|
RamsteinWR/keras-1
|
a37fb96cea857c0a2d4032cc40c01cfccd40e8b8
|
[
"MIT"
] | null | null | null |
setup.py
|
RamsteinWR/keras-1
|
a37fb96cea857c0a2d4032cc40c01cfccd40e8b8
|
[
"MIT"
] | 31
|
2016-01-26T16:08:28.000Z
|
2021-11-29T00:24:10.000Z
|
from setuptools import setup
from setuptools import find_packages
setup(name='Keras',
version='0.2.0',
description='Theano-based Deep Learning library',
author='Francois Chollet',
author_email='francois.chollet@gmail.com',
url='https://github.com/fchollet/keras',
download_url='https://github.com/fchollet/keras/tarball/0.2.0',
license='MIT',
install_requires=['theano', 'pyyaml'],
extras_require={
'h5py': ['h5py'],
},
packages=find_packages())
| 29.111111
| 69
| 0.646947
|
187fd5db85d576dba297b31e02a1cd950a4decdd
| 40,078
|
py
|
Python
|
assets/source/zhihu.py
|
liticer/liticer.github.io
|
0e76a570e0ee8aa560b8719f5b564a116bcc5593
|
[
"MIT"
] | null | null | null |
assets/source/zhihu.py
|
liticer/liticer.github.io
|
0e76a570e0ee8aa560b8719f5b564a116bcc5593
|
[
"MIT"
] | null | null | null |
assets/source/zhihu.py
|
liticer/liticer.github.io
|
0e76a570e0ee8aa560b8719f5b564a116bcc5593
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
;$$;
#############
#############;#####o
## o#########################
##### $###############################
## ###$ ######! ##########################
## ### $### ################### ######
### ### ##o#######################
###### ;### #### #####################
## ### ###### ######&&################
## ### ###### ## ############ #######
o## ######## ## ##################
##o ### #### #######o#######
## ###### ###########&#####
## #### #############!
### #########
#####& ## o####
###### ## ####*
## !## #####
## ##* ####; ##
##### #####o #####
#### ### ### $###o
### ## ####! $###
## #####
## ##
;## ### ;
##$ ##
####### ##
##### &## ##
### ### ###
### ### ##
## ;## ##
## ### ##
### ### ##
#### ##
### ##
##; ##
##$ ##&
## ##
##; ##
## ##;
### ### ##$
### ### ##
###################### #####&&&&&&&&&&&&###
### $#####$ ############&$o$&################################
# $&########&o
'''
# Build-in / Std
import os, sys, time, platform, random, functools
import re, json, cookielib
# Requirements
import requests, termcolor, html2text
try:
from bs4 import BeautifulSoup
except:
import BeautifulSoup
# Some modules written by us
from auth import islogin
from auth import Logging
"""
Note:
1. 身份验证由 `auth.py` 完成。
2. 身份信息保存在当前目录的 `cookies` 文件中。
3. `requests` 对象可以直接使用,身份信息已经自动加载。
By Luozijun (https://github.com/LuoZijun), 09/09 2015
"""
# 指示是否运行调试
DEBUG = True
# 加载Cookies
requests = requests.Session()
requests.cookies = cookielib.LWPCookieJar('cookies')
# 检查是否已经登陆成功
try:
requests.cookies.load(ignore_discard=True)
except:
Logging.error(u"你还没有登录知乎哦 ...")
Logging.info(u"执行 `python auth.py` 即可以完成登录。")
raise Exception("无权限(403)")
if islogin() != True:
Logging.error(u"你的身份信息已经失效,请重新生成身份信息( `python auth.py` )。")
raise Exception("无权限(403)")
# 设置文本默认编码为utf8
reload(sys)
sys.setdefaultencoding('utf8')
# 抽象一个问题类
class Question:
url = None
soup = None
# 初始化该问题的url、title
def __init__(self, url, title=None):
# 所有问题的url前面都相同,后8位是问题的编号
if url[0:len(url) - 8] != "http://www.zhihu.com/question/":
raise ValueError("\"" + url + "\"" + " : it isn't a question url.")
else:
self.url = url
if title != None: self.title = title
# 获取网页并解析
def parser(self):
# 获取该问题的网页
r = requests.get(self.url)
# 对问题网页进行解析
self.soup = BeautifulSoup(r.content)
# 调试-->将soup结果输出到文件
if DEBUG:
f = open('log/question_soup.txt', 'w')
f.writelines(self.soup.prettify())
f.close()
# 装饰器处理Windows下默认的字符编码问题
def encode_on_windows(func):
'''
Decorator for encode result of func on Windows.
@encode_on_windows
def func:
...
'''
@functools.wraps(func)
def _wrapper(*args, **kw):
result = func(*args, **kw)
if platform.system() == 'Windows':
return result.decode('utf-8').encode('gbk')
return result
return _wrapper
# 获取网页的标题
@encode_on_windows
def get_title(self):
if hasattr(self, "title"):
return self.title
if self.soup == None:
self.parser()
soup = self.soup
title = soup.find("h2", class_="zm-item-title")\
.string.encode("utf-8").replace("\n", "")
self.title = title
return self.title
# 获取问题的详细描述
@encode_on_windows
def get_detail(self):
if self.soup == None:
self.parser()
soup = self.soup
detail = soup.find("div", id="zh-question-detail")\
.div.get_text().encode("utf-8")
return detail
# 获取答案的数目
def get_answers_num(self):
if self.soup == None:
self.parser()
soup = self.soup
answers_num = 0
answers_num_label = soup.find("h3", id="zh-question-answer-num")
if answers_num_label != None:
answers_num = int(answers_num_label["data-num"])
else:
raise ValueError('Unexcepted label when get_answer_num')
return answers_num
# 获取问题的关注者数目
def get_followers_num(self):
if self.soup == None:
self.parser()
soup = self.soup
followers_num = 0
followers_num_label = soup.find("div", class_="zg-gray-normal")
if followers_num_label != None:
followers_num = int(followers_num_label.a.strong.string)
else:
raise ValueError('Unexcepted label when get_followers_num')
return followers_num
# 获取问题所属的话题
def get_topics(self):
if self.soup == None:
self.parser()
soup = self.soup
topic_list = soup.find_all("a", class_="zm-item-tag")
topics = []
for i in topic_list:
topic = i.contents[0].encode("utf-8").replace("\n", "")
if platform.system() == 'Windows':
topic = topic.decode('utf-8').encode('gbk')
topics.append(topic)
return topics
# 获取该问题对应的所有答案
def get_all_answers(self):
answers_num = self.get_answers_num()
if answers_num == 0:
# print "No answer."
return
error_answer_count = 0
my_answer_count = 0
# 每一次请求会返回50个答案
# 前50个答案,只要抓取对应的URL就可以获得答案
for j in xrange(min(answers_num, 50)):
if DEBUG:
print j
if self.soup == None:
self.parser()
soup = BeautifulSoup(self.soup.encode("utf-8"))
# DEBUG,输出中间结果
if DEBUG:
f = open('log/question_soup_x.txt', 'w')
f.writelines(soup.prettify())
f.close()
# raw_input("Pause, press <Enter> to continue...")
# 这个答案是自己写的
is_my_answer = False
item_answer = soup.find_all("div", class_="zm-item-answer")[j]
if not item_answer.find("span", class_="count"):
my_answer_count += 1
is_my_answer = True
# 这个答案有问题
if not item_answer.find("div", class_="zm-editable-content clearfix"):
error_answer_count += 1
continue
author = None
# 这个答案是匿名用户写的
author_class = "zm-item-answer-author-info"
author_info = soup.find_all("div", class_=author_class)[j]
anoy_name = author_info.find("span", class_="name")
if anoy_name and anoy_name.string == u"匿名用户":
author_url = None
author = User(author_url)
# 这个答案是非匿名用户写的
else:
author_tag = author_info.find_all("a")[1]
author_id = author_tag.string.encode("utf-8")
author_url = "http://www.zhihu.com" + author_tag["href"]
author = User(author_url, author_id)
# 这个答案有多少赞
count = ""
if is_my_answer == True:
count = item_answer.find("a", class_="zm-item-vote-count").string
else:
count = soup.find_all("span", class_="count")\
[j - my_answer_count].string
if count[-1] == "K":
upvote = int(count[0:(len(count) - 1)]) * 1000
elif count[-1] == "W":
upvote = int(count[0:(len(count) - 1)]) * 10000
else:
upvote = int(count)
# 这个答案的URL和内容
answer_id = soup.find_all("a", class_="answer-date-link")[j]["href"]
answer_url = "http://www.zhihu.com" + answer_id
answer = soup.find_all("div", class_=\
"zm-editable-content clearfix")[j - error_answer_count]
soup.body.extract() # 删除soup中网页的body部分并加入一个新的body
soup.head.insert_after(soup.new_tag("body", **{'class': 'zhi'}))
soup.body.append(answer) # 添加Answer的内容到body部分
# 这个答案中的图片,修改每张图片的URL,并删除一些不必要的代码
img_list = soup.find_all("img", class_="content_image lazy")
for img in img_list:
img["src"] = img["data-actualsrc"]
img_list = soup.find_all("img", \
class_="origin_image zh-lightbox-thumb lazy")
for img in img_list:
img["src"] = img["data-actualsrc"]
noscript_list = soup.find_all("noscript")
for noscript in noscript_list:
noscript.extract()
content = soup
# 用生成器返回一个Answer,这样避免了一次生成耗时间耗内存的问题
answer = Answer(answer_url, self, author, upvote, content)
yield answer
# 超过50个答案,需要点击“加载更多”的情况
for i in xrange((answers_num-51)/50 + 1):
# 下标应该从1开始记起
i += 1
# 从浏览器中按F12,可以看到是一个post请求,对应的URL如下:
post_url = "http://www.zhihu.com/node/QuestionAnswerListV2"
# 从该问题的URL中可以找到_xsrf的值
_xsrf = self.soup.find("input", attrs={'name': '_xsrf'})["value"]
# 这是一个偏移量,指示这次请求需要跳过前面多少个答案
offset = i * 50
# 发送post请求参数的一部分
params = json.dumps({"url_token": int(self.url[-8:]),\
"pagesize": 50,\
"offset": offset})
# 发送post请求的表单数据
data = {
'_xsrf': _xsrf,
'method': "next",
'params': params
}
# 发送post请求的消息头
header = {
'User-Agent': "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:34.0)"\
+ " Gecko/20100101 Firefox/34.0",
'Host': "www.zhihu.com",
'Referer': self.url
}
# 发送post请求
r = requests.post(post_url, data=data, headers=header)
# 对这次请求得到的答案进行解析,并返回
answer_list = r.json()["msg"]
for j in xrange(min(answers_num-i*50, 50)):
if DEBUG:
print i*50+j
# 该问题URL的soup解析
soup = BeautifulSoup(self.soup.encode("utf-8"))
# 这次请求中的一个答案的soup解析
answer_soup = BeautifulSoup(answer_list[j])
# DEBUG,输出中间结果
if DEBUG:
f = open('log/question_soup_x.txt', 'w')
f.writelines(answer_soup.prettify())
f.close()
# raw_input("Pause, press <Enter> to continue...")
# 这个答案有问题,直接跳过
if answer_soup.find("div",\
class_="zm-editable-content clearfix") == None:
continue
# 这个答案的作者
author = None
author_class = "zm-item-answer-author-info"
author_info = answer_soup.find("div", class_=author_class)
anoy_name = author_info.find("span", class_="name")
if anoy_name and anoy_name.string == u"匿名用户":
author_url = None
author = User(author_url)
else:
author_tag = author_info.find_all("a")[1]
author_id = author_tag.string.encode("utf-8")
author_url = "http://www.zhihu.com" + author_tag["href"]
author = User(author_url, author_id)
# 这个答案的赞数
count = answer_soup.find("span", class_="count")
if count == None:
count = answer_soup.find("a", \
class_="zm-item-vote-count").string
else:
count = count.string
if count[-1] == "K":
upvote = int(count[0:(len(count) - 1)]) * 1000
elif count[-1] == "W":
upvote = int(count[0:(len(count) - 1)]) * 10000
else:
upvote = int(count)
# 这个答案的URL和内容
answer_id = answer_soup.find("a", class_="answer-date-link")["href"]
answer_url = "http://www.zhihu.com" + answer_id
answer = answer_soup.find("div", \
class_="zm-editable-content clearfix")
soup.body.extract() # 删除soup中网页的body部分并加入一个新的body
soup.head.insert_after(soup.new_tag("body", **{'class': 'zhi'}))
soup.body.append(answer) # 添加Answer的内容到body部分
# 这个答案中的图片,修改每张图片的URL,并删除一些不必要的代码
img_list = soup.find_all("img", class_="content_image lazy")
for img in img_list:
img["src"] = img["data-actualsrc"]
img_list = soup.find_all("img", \
class_="origin_image zh-lightbox-thumb lazy")
for img in img_list:
img["src"] = img["data-actualsrc"]
noscript_list = soup.find_all("noscript")
for noscript in noscript_list:
noscript.extract()
content = soup
# 用生成器返回一个Answer,这样避免了一次生成耗时间耗内存的问题
answer = Answer(answer_url, self, author, upvote, content)
yield answer
# 获取前i个答案
def get_top_i_answers(self, n):
j = 0
answers = self.get_all_answers()
for answer in answers:
j = j + 1
if j > n:
break
yield answer
# 获取第1个答案
def get_top_answer(self):
for answer in self.get_top_i_answers(1):
return answer
# 获取该问题被浏览的次数
def get_visit_times(self):
if self.soup == None:
self.parser()
soup = self.soup
return int(soup.find("meta", itemprop="visitsCount")["content"])
# 抽象一个用户类
class User:
user_url = None
soup = None
# 初始化该用户的url、user_id
def __init__(self, user_url, user_id=None):
if user_url == None:
self.user_id = "匿名用户"
elif user_url[0:28] != "http://www.zhihu.com/people/":
raise ValueError('"'+user_url+'": it isn\'t a user url.')
else:
self.user_url = user_url
if user_id != None:
self.user_id = user_id
# 获取用户主页并解析
def parser(self):
r = requests.get(self.user_url)
soup = BeautifulSoup(r.content)
self.soup = soup
# 装饰器处理Windows下默认的字符编码问题
def encode_on_windows(func):
'''
Decorator for encode result of func on Windows.
@encode_on_windows
def func:
...
'''
@functools.wraps(func)
def _wrapper(*args, **kw):
result = func(*args, **kw)
if platform.system() == 'Windows':
return result.decode('utf-8').encode('gbk')
return result
return _wrapper
# 获取用户名
@encode_on_windows
def get_user_id(self):
if self.user_url == None:
return "匿名用户"
if hasattr(self, "user_id"):
return self.user_id
if self.soup == None:
self.parser()
user_l = self.soup.find("div", class_="title-section ellipsis")
user_id = user_lb.find("span", class_="name").string.encode("utf-8")
self.user_id = user_id
return user_id
# 获取关注该用户的人总数
def get_followees_num(self):
if self.user_url == None:
return 0
if self.soup == None:
self.parser()
followees_c = "zm-profile-side-following zg-clear"
followees_l = self.soup.find("div", class_=followees_c)
followees_num = int(followees_l.find("a").strong.string)
return followees_num
# 获取该用户关注的人总数
def get_followers_num(self):
if self.user_url == None:
return 0
if self.soup == None:
self.parser()
followers_c = "zm-profile-side-following zg-clear"
followers_l = self.soup.find("div", class_=followers_c)
followers_num = int(followers_l.find_all("a")[1].strong.string)
return followers_num
# 获取该用户获得赞数
def get_agree_num(self):
if self.user_url == None:
return 0
if self.soup == None:
self.parser()
agree_c = "zm-profile-header-user-agree"
agree_l = self.soup.find("span", class_=agree_c)
agree_num = int(agree_l.find("span",).strong.string)
return agree_num
# 获取该用户获得的感谢数
def get_thanks_num(self):
if self.user_url == None:
return 0
if self.soup == None:
self.parser()
thanks_c = "zm-profile-header-user-thanks"
thanks_l = self.soup.find("span", class_= thanks_c)
thanks_num = int(thanks_l.strong.string)
return thanks_num
# 获取该用户提问的问题数
def get_asks_num(self):
if self.user_url == None:
return 0
if self.soup == None:
self.parser()
asks_l = self.soup.find_all("span", class_="num")
asks_num = int(asks_l[0].string)
return asks_num
# 获取该用户回答的问题数
def get_answers_num(self):
if self.user_url == None:
return 0
if self.soup == None:
self.parser()
answers_l = self.soup.find_all("span", class_="num")
answers_num = int(answers_l[1].string)
return answers_num
# 获取该用户收藏夹数
def get_collections_num(self):
if self.user_url == None:
return 0
if self.soup == None:
self.parser()
collections_l = self.soup.find_all("span", class_="num")
collections_num = int(collections_l[3].string)
return collections_num
# 获取关注该用户的人
def get_followees(self):
if self.user_url == None:
return
followees_num = self.get_followees_num()
if followees_num == 0:
return
# 获取关注该用户的用户页页面并解析
followee_url = self.user_url + "/followees"
r = requests.get(followee_url)
soup = BeautifulSoup(r.content)
# 对于每20个用户发送一次请求
# 首次不用发送请求,因为已经获取到页面了
user_url_list = soup.find_all("h2", class_="zm-list-content-title")
for j in xrange(min(followees_num, 20)):
user_url = user_url_list[j].a["href"],
user_id = user_url_list[j].a.string.encode("utf-8")
yield User(user_url, user_id)
# 处理剩下的关注该用户的人
for i in xrange((followees_num - 21) / 20 + 1):
# 发送post请求,再获取20个关注者
i += 1
# 将要发送post请求的URL
post_url = "http://www.zhihu.com/node/ProfileFolloweesListV2"
# 准备发送该post请求的参数
_xsrf = soup.find("input", attrs={'name': '_xsrf'})["value"]
offset = i * 20
hash_id = re.findall("hash_id": "(.*)"},", r.text)[0]
dumps_p = {
"offset": offset,
"order_by": "created",
"hash_id": hash_id
}
params = json.dumps(dumps_p)
data = {
'_xsrf': _xsrf,
'method': "next",
'params': params
}
# 准备发送该post请求的消息头
header = {
'User-Agent': "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:34.0)"\
+ " Gecko/20100101 Firefox/34.0",
'Host': "www.zhihu.com",
'Referer': followee_url
}
# 发送post请求
r_post = requests.post(post_url, data=data, headers=header)
# 获取关注者列表
followee_list = r_post.json()["msg"]
for j in xrange(min(followees_num - i * 20, 20)):
soup_f = BeautifulSoup(followee_list[j])
user_l = soup_f.find("h2", class_="zm-list-content-title").a
yield User(user_l["href"], user_l.string.encode("utf-8"))
# 获取该用户关注的用户列表
def get_followers(self):
if self.user_url == None:
return
followers_num = self.get_followers_num()
if followers_num == 0:
return
# 获取关注者页面并解析
follower_url = self.user_url + "/followers"
r = requests.get(follower_url)
soup = BeautifulSoup(r.content)
# 首次不用再get,因为前面已经获取过了
user_ul = soup.find_all("h2", class_="zm-list-content-title")
for j in xrange(min(followers_num, 20)):
user_url = user_ul[j].a["href"]
user_id = user_ul[j].a.string.encode("utf-8")
yield User(user_url, user_id)
for i in xrange((followers_num - 21) / 20 + 1):
# 发送post请求,再获取20个关注者
i += 1
# 发送post请求的地址
post_url = "http://www.zhihu.com/node/ProfileFollowersListV2"
# 发送post请求的参数
_xsrf = soup.find("input", attrs={'name': '_xsrf'})["value"]
offset = i * 20
hash_id = re.findall("hash_id": "(.*)"},", r.text)[0]
dumps_p = {
"offset": offset,
"order_by": "created",
"hash_id": hash_id
}
params = json.dumps(dumps_p)
data = {
'_xsrf': _xsrf,
'method': "next",
'params': params
}
# 发送post请求的消息头
header = {
'User-Agent': "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:34.0)"\
+ "Gecko/20100101 Firefox/34.0",
'Host': "www.zhihu.com",
'Referer': follower_url
}
# 发送post请求
r_post = requests.post(post_url, data=data, headers=header)
# 解析请求的结果,得到关注者列表
follower_list = r_post.json()["msg"]
for j in xrange(min(followers_num - i * 20, 20)):
soup_f = BeautifulSoup(follower_list[j])
user_l = soup_f.find("h2", class_="zm-list-content-title").a
yield User(user_l["href"], user_l.string.encode("utf-8"))
# 获取该用户提问的问题
def get_asks(self):
if self.user_url == None:
return
asks_num = self.get_asks_num()
if asks_num == 0:
return
for i in xrange((asks_num - 1) / 20 + 1):
# 每20个问题发送一次get请求,得到请求结果并解析
ask_url = self.user_url + "/asks?page=" + str(i + 1)
r = requests.get(ask_url)
soup = BeautifulSoup(r.content)
for question in soup.find_all("a", class_="question_link"):
url = "http://www.zhihu.com" + question["href"]
title = question.string.encode("utf-8")
yield Question(url, title)
# 获取该用户写过的答案
def get_answers(self):
if self.user_url == None:
return
answers_num = self.get_answers_num()
if answers_num == 0:
return
for i in xrange((answers_num - 1) / 20 + 1):
# 每20个答案发送一次get请求,获得结果并解析
answer_url = self.user_url + "/answers?page=" + str(i + 1)
r = requests.get(answer_url)
soup = BeautifulSoup(r.content)
for answer in soup.find_all("a", class_="question_link"):
question_url = "http://www.zhihu.com" + answer["href"][0:18]
question_title = answer.string.encode("utf-8")
question = Question(question_url, question_title)
answer_url = "http://www.zhihu.com" + answer["href"]
yield Answer(answer_url, question, self)
# 获取该用户的收藏夹
def get_collections(self):
if self.user_url == None:
return
collections_num = self.get_collections_num()
if collections_num == 0:
return
for i in xrange((collections_num - 1) / 20 + 1):
# 每20个收藏夹发送一次post请求,获取结果并解析
collection_url = self.user_url + "/collections?page=" + str(i + 1)
r = requests.get(collection_url)
soup = BeautifulSoup(r.content)
collection_c = "zm-profile-section-item zg-clear"
collection_l = soup.find_all("div", class_=collection_c)
for collection in collection_l:
label = collection.find("a", class_="zm-profile-fav-item-title")
url = "http://www.zhihu.com" + label["href"]
name = label.string.encode("utf-8")
yield Collection(url, name, self)
# 抽象一个答案类
class Answer:
answer_url = None
soup = None
# 初始化该答案的url、question、author、upvote、content
def __init__(self, answer_url, question=None,\
author=None, upvote=None, content=None):
self.answer_url = answer_url
if question != None:
self.question = question
if author != None:
self.author = author
if upvote != None:
self.upvote = upvote
if content != None:
self.content = content
# 获取答案页面并解析
def parser(self):
r = requests.get(self.answer_url)
soup = BeautifulSoup(r.content)
self.soup = soup
# 获取答案对应的问题
def get_question(self):
if hasattr(self, "question"):
return self.question
if self.soup == None:
self.parser()
soup = self.soup
question_c = "zm-item-title zm-editable-content"
question_l = soup.find("h2", class_= question_c).a
url = "http://www.zhihu.com" + question_l["href"]
title = question_l.string.encode("utf-8")
question = Question(url, title)
return question
# 获取答案的作者
def get_author(self):
if hasattr(self, "author"):
return self.author
if self.soup == None:
self.parser()
soup = self.soup
author_c = "zm-item-answer-author-wrap"
author_l = soup.find("h3", class_=author_c)
if author_l.string == u"匿名用户":
author_url = None
author = User(author_url)
else:
author_tag = author_l.find_all("a")[1]
author_id = author_tag.string.encode("utf-8")
author_url = "http://www.zhihu.com" + author_tag["href"]
author = User(author_url, author_id)
return author
# 获取该答案所获得的赞数
def get_upvote(self):
if hasattr(self, "upvote"):
return self.upvote
if self.soup == None:
self.parser()
soup = self.soup
count = soup.find("span", class_="count").string
if count[-1] == "K":
upvote = int(count[0:(len(count) - 1)]) * 1000
elif count[-1] == "W":
upvote = int(count[0:(len(count) - 1)]) * 10000
else:
upvote = int(count)
return upvote
# 获取该答案的内容
def get_content(self):
if hasattr(self, "content"):
return self.content
if self.soup == None:
self.parser()
soup = BeautifulSoup(self.soup.encode("utf-8"))
# 为Answer查找并构建一个新的html
answer = soup.find("div", class_="zm-editable-content clearfix")
soup.body.extract()
soup.head.insert_after(soup.new_tag("body", **{'class': 'zhi'}))
soup.body.append(answer)
# 改变其中image的src属性,并去掉一些不执行的代码
img_list = soup.find_all("img", class_="content_image lazy")
for img in img_list:
img["src"] = img["data-actualsrc"]
img_c = "origin_image zh-lightbox-thumb lazy"
img_list = soup.find_all("img", class_=img_list)
for img in img_list:
img["src"] = img["data-actualsrc"]
noscript_list = soup.find_all("noscript")
for noscript in noscript_list:
noscript.extract()
content = soup
self.content = content
return content
# 将答案保存成TXT格式
def to_txt(self):
content = self.get_content()
body = content.find("body")
# 分别在br和li后面插入换行符
br_list = body.find_all("br")
for br in br_list:
br.insert_after(content.new_string("\n"))
li_list = body.find_all("li")
for li in li_list:
li.insert_before(content.new_string("\n"))
# 获取答案被保存的路径和文件名
file_path = os.path.join(os.path.join(os.getcwd(), "text"))
file_name = self.get_question().get_title() + "--"\
+ self.get_author().get_user_id() + "的回答.txt"
# 创建路径,处理文件名编码
if not os.path.isdir(file_path):
os.makedirs(file_path)
if platform.system() == 'Windows':
file_name = file_name.decode('utf-8').encode('gbk')
print file_name
# 先处理匿名用户的情况
if self.get_author().user_url == None:
if os.path.exists(os.path.join(file_path, file_name)):
f = open(os.path.join(file_path, file_name), "a")
f.write("\n\n")
else:
f = open(os.path.join(file_path, file_name), "a")
f.write(self.get_question().get_title() + "\n\n")
# 处理非匿名用户的情况
else:
f = open(os.path.join(file_path, file_name), "wt")
f.write(self.get_question().get_title() + "\n\n")
# 按照平台不同,使用不同编码格式开始写文件
file_header = "作者: " + self.get_author().get_user_id()\
+ " 赞同: " + str(self.get_upvote()) + "\n\n"
file_ender = "\n" + "原链接: " + self.answer_url
if platform.system() == 'Windows':
f.write(file_header.decode('utf-8').encode('gbk'))
f.write(body.get_text().encode("gbk"))
f.write(file_ender.decode('utf-8').encode('gbk'))
else:
f.write(file_header)
f.write(body.get_text().encode("utf-8"))
f.write(file_ender)
f.close()
# 将该答案保存成md格式
def to_md(self):
content = self.get_content()
# 获取答案被保存的路径和文件名
file_path = os.path.join(os.path.join(os.getcwd(), "markdown"))
file_name = self.get_question().get_title() + "--"\
+ self.get_author().get_user_id() + "的回答.md"
# 创建路径,处理文件名编码
if not os.path.isdir(file_path):
os.makedirs(file_path)
if platform.system() == 'Windows':
file_name = file_name.decode('utf-8').encode('gbk')
print file_name
# 先处理匿名用户的情况
if self.get_author().user_url == None:
if os.path.exists(os.path.join(file_path, file_name)):
f = open(os.path.join(file_path, file_name), "a")
f.write("\n")
else:
f = open(os.path.join(file_path, file_name), "a")
f.write("# " + self.get_question().get_title() + "\n")
# 再处理非匿名用户的情况
else:
f = open(os.path.join(file_path, file_name), "wt")
f.write("# " + self.get_question().get_title() + "\n")
# 对Anwser的html文件内容进行处理
text = html2text.html2text(content.decode('utf-8')).encode("utf-8")
# 处理步骤1
r = re.findall(r'\*\*(.*?)\*\*', text)
for i in r:
if i != " ":
text = text.replace(i, i.strip())
# 处理步骤2
r = re.findall(r'_(.*)_', text)
for i in r:
if i != " ":
text = text.replace(i, i.strip())
# 处理步骤3
r = re.findall(r'!\[\]\((?:.*?)\)', text)
for i in r:
text = text.replace(i, i + "\n\n")
# 按照平台不同,使用不同编码格式开始写文件
file_header = "## 作者: " + self.get_author().get_user_id()\
+ " 赞同: " + str(self.get_upvote()) + "\n"
file_ender = "#### 原链接: " + self.answer_url
if platform.system() == 'Windows':
f.write(file_header.decode('utf-8').encode('gbk'))
f.write(text.decode('utf-8').encode('gbk'))
f.write(file_ender.decode('utf-8').encode('gbk'))
else:
f.write(file_header)
f.write(text)
f.write(file_ender)
f.close()
# 获取该答案被访问的次数
def get_visit_times(self):
if self.soup == None:
self.parser()
soup = self.soup
for tag_p in soup.find_all("p"):
if "所属问题被浏览" in tag_p.contents[0].encode('utf-8'):
return int(tag_p.contents[1].contents[0])
# 获取该答案的支持者
def get_voters(self):
if self.soup == None:
self.parser()
soup = self.soup
data_aid = soup.find("div", class_="zm-item-answer ")["data-aid"]
request_url = 'http://www.zhihu.com/node/AnswerFullVoteInfoV2'
params={"params": "{\"answer_id\":\"%d\"}" % int(data_aid)}
r = requests.get(request_url, params=params)
soup = BeautifulSoup(r.content)
voters_info = soup.find_all("span")[1:-1]
if len(voters_info) == 0:
return
for voter_info in voters_info:
if voter_info.string == ( u"匿名用户、" or u"匿名用户"):
voter_url = None
yield User(voter_url)
else:
voter_url = "http://www.zhihu.com" + str(voter_info.a["href"])
voter_id = voter_info.a["title"].encode("utf-8")
yield User(voter_url, voter_id)
# 抽象一个收藏夹类
class Collection:
url = None
soup = None
# 初始化url、name、creator变量
def __init__(self, url, name=None, creator=None):
if url[0:len(url) - 8] != "http://www.zhihu.com/collection/":
raise ValueError("\""+url+"\""+" : it isn't a collection url.")
else:
self.url = url
if name != None:
self.name = name
if creator != None:
self.creator = creator
# 获取该收藏夹对应的网页并解析
def parser(self):
r = requests.get(self.url)
soup = BeautifulSoup(r.content)
self.soup = soup
# 获取该收藏夹的名字
def get_name(self):
if not hasattr(self, 'name'):
if self.soup == None:
self.parser()
soup = self.soup
name_l = self.soup.find("h2", id="zh-fav-head-title")
self.name = name_l.string.encode("utf-8").strip()
if platform.system() == 'Windows':
return self.name.decode('utf-8').encode('gbk')
return self.name
# 获取该收藏夹的创建者
def get_creator(self):
if not hasattr(self, 'creator'):
if self.soup == None:
self.parser()
creator_l = self.soup.find("h2", class_="zm-list-content-title")
creator_id = creator_l.a.string.encode("utf-8")
creator_url = "http://www.zhihu.com" + creator_l.a["href"]
creator = User(creator_url, creator_id)
self.creator = creator
return creator
# 获取该收藏夹下的所有答案
def get_all_answers(self):
i = 1
while True:
r = requests.get(self.url + "?page=" + str(i))
answer_soup = BeautifulSoup(r.content)
answer_list = answer_soup.find_all("div", class_="zm-item")
if len(answer_list) == 0:
break
for answer in answer_list:
if answer.find("p", class_="note"):
continue
question_link = answer.find("h2")
if question_link != None:
question_url = "http://www.zhihu.com" + question_link.a["href"]
question_title = question_link.a.string.encode("utf-8")
question = Question(question_url, question_title)
answer_l = answer.find("span", class_="answer-date-link-wrap")
answer_url = "http://www.zhihu.com" + answer_l.a["href"]
author = None
amswer_l = answer.find("h3", class_="zm-item-answer-author-wrap")
if answer_l.string == u"匿名用户":
author_url = None
author = User(author_url)
else:
author_tag = answer_l.find_all("a")[0]
author_id = author_tag.string.encode("utf-8")
author_url = "http://www.zhihu.com" + author_tag["href"]
author = User(author_url, author_id)
yield Answer(answer_url, question, author)
i = i + 1
# 获取该收藏夹下前n个答案
def get_top_i_answers(self, n):
j = 0
answers = self.get_all_answers()
for answer in answers:
j = j + 1
if j > n:
break
yield answer
| 39.138672
| 100
| 0.464744
|
ff260f9fab80ac0549f254ecd752c0af3de2073d
| 17,275
|
py
|
Python
|
ingestion/src/metadata/ingestion/sink/elasticsearch.py
|
avignd/OpenMetadata
|
f81fb3d5e8ec391928afba57868cf0f9d7dc0f74
|
[
"Apache-2.0"
] | null | null | null |
ingestion/src/metadata/ingestion/sink/elasticsearch.py
|
avignd/OpenMetadata
|
f81fb3d5e8ec391928afba57868cf0f9d7dc0f74
|
[
"Apache-2.0"
] | null | null | null |
ingestion/src/metadata/ingestion/sink/elasticsearch.py
|
avignd/OpenMetadata
|
f81fb3d5e8ec391928afba57868cf0f9d7dc0f74
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import time
from typing import List, Optional
from elasticsearch import Elasticsearch
from metadata.config.common import ConfigModel
from metadata.generated.schema.entity.data.chart import Chart
from metadata.generated.schema.entity.data.dashboard import Dashboard
from metadata.generated.schema.entity.data.database import Database
from metadata.generated.schema.entity.data.pipeline import Pipeline, Task
from metadata.generated.schema.entity.data.table import Column, Table
from metadata.generated.schema.entity.data.topic import Topic
from metadata.generated.schema.entity.services.dashboardService import DashboardService
from metadata.generated.schema.entity.services.databaseService import DatabaseService
from metadata.generated.schema.entity.services.messagingService import MessagingService
from metadata.generated.schema.entity.services.pipelineService import PipelineService
from metadata.generated.schema.type import entityReference
from metadata.ingestion.api.common import Record, WorkflowContext
from metadata.ingestion.api.sink import Sink, SinkStatus
from metadata.ingestion.models.table_metadata import (
DashboardESDocument,
PipelineESDocument,
TableESDocument,
TopicESDocument,
)
from metadata.ingestion.ometa.ometa_api import OpenMetadata
from metadata.ingestion.ometa.openmetadata_rest import MetadataServerConfig
from metadata.ingestion.sink.elasticsearch_constants import (
DASHBOARD_ELASTICSEARCH_INDEX_MAPPING,
PIPELINE_ELASTICSEARCH_INDEX_MAPPING,
TABLE_ELASTICSEARCH_INDEX_MAPPING,
TOPIC_ELASTICSEARCH_INDEX_MAPPING,
)
logger = logging.getLogger(__name__)
class ElasticSearchConfig(ConfigModel):
es_host: str
es_port: int = 9200
es_username: Optional[str] = None
es_password: Optional[str] = None
index_tables: Optional[bool] = True
index_topics: Optional[bool] = True
index_dashboards: Optional[bool] = True
index_pipelines: Optional[bool] = True
table_index_name: str = "table_search_index"
topic_index_name: str = "topic_search_index"
dashboard_index_name: str = "dashboard_search_index"
pipeline_index_name: str = "pipeline_search_index"
class ElasticsearchSink(Sink):
""" """
DEFAULT_ELASTICSEARCH_INDEX_MAPPING = TABLE_ELASTICSEARCH_INDEX_MAPPING
@classmethod
def create(
cls, config_dict: dict, metadata_config_dict: dict, ctx: WorkflowContext
):
config = ElasticSearchConfig.parse_obj(config_dict)
metadata_config = MetadataServerConfig.parse_obj(metadata_config_dict)
return cls(ctx, config, metadata_config)
def __init__(
self,
ctx: WorkflowContext,
config: ElasticSearchConfig,
metadata_config: MetadataServerConfig,
) -> None:
self.config = config
self.metadata_config = metadata_config
self.ctx = ctx
self.status = SinkStatus()
self.metadata = OpenMetadata(self.metadata_config)
self.elasticsearch_doc_type = "_doc"
http_auth = None
if self.config.es_username:
http_auth = (self.config.es_username, self.config.es_password)
self.elasticsearch_client = Elasticsearch(
[
{"host": self.config.es_host, "port": self.config.es_port},
],
http_auth=http_auth,
)
if self.config.index_tables:
self._check_or_create_index(
self.config.table_index_name, TABLE_ELASTICSEARCH_INDEX_MAPPING
)
if self.config.index_topics:
self._check_or_create_index(
self.config.topic_index_name, TOPIC_ELASTICSEARCH_INDEX_MAPPING
)
if self.config.index_dashboards:
self._check_or_create_index(
self.config.dashboard_index_name, DASHBOARD_ELASTICSEARCH_INDEX_MAPPING
)
if self.config.index_pipelines:
self._check_or_create_index(
self.config.pipeline_index_name, PIPELINE_ELASTICSEARCH_INDEX_MAPPING
)
def _check_or_create_index(self, index_name: str, es_mapping: str):
"""
Retrieve all indices that currently have {elasticsearch_alias} alias
:return: list of elasticsearch indices
"""
if self.elasticsearch_client.indices.exists(index_name):
mapping = self.elasticsearch_client.indices.get_mapping()
if not mapping[index_name]["mappings"]:
logger.debug(
f"There are no mappings for index {index_name}. Updating the mapping"
)
es_mapping_dict = json.loads(es_mapping)
es_mapping_update_dict = {
"properties": es_mapping_dict["mappings"]["properties"]
}
self.elasticsearch_client.indices.put_mapping(
index=index_name, body=json.dumps(es_mapping_update_dict)
)
else:
logger.warning(
"Received index not found error from Elasticsearch. "
+ "The index doesn't exist for a newly created ES. It's OK on first run."
)
# create new index with mapping
self.elasticsearch_client.indices.create(index=index_name, body=es_mapping)
def write_record(self, record: Record) -> None:
if isinstance(record, Table):
table_doc = self._create_table_es_doc(record)
self.elasticsearch_client.index(
index=self.config.table_index_name,
id=str(table_doc.table_id),
body=table_doc.json(),
)
if isinstance(record, Topic):
topic_doc = self._create_topic_es_doc(record)
self.elasticsearch_client.index(
index=self.config.topic_index_name,
id=str(topic_doc.topic_id),
body=topic_doc.json(),
)
if isinstance(record, Dashboard):
dashboard_doc = self._create_dashboard_es_doc(record)
self.elasticsearch_client.index(
index=self.config.dashboard_index_name,
id=str(dashboard_doc.dashboard_id),
body=dashboard_doc.json(),
)
if isinstance(record, Pipeline):
pipeline_doc = self._create_pipeline_es_doc(record)
self.elasticsearch_client.index(
index=self.config.pipeline_index_name,
id=str(pipeline_doc.pipeline_id),
body=pipeline_doc.json(),
)
if hasattr(record.name, "__root__"):
self.status.records_written(record.name.__root__)
else:
self.status.records_written(record.name)
def _create_table_es_doc(self, table: Table):
fqdn = table.fullyQualifiedName
database = table.database.name
table_name = table.name
suggest = [
{"input": [fqdn], "weight": 5},
{"input": [table_name], "weight": 10},
]
column_names = []
column_descriptions = []
tags = set()
timestamp = time.time()
tier = None
for table_tag in table.tags:
if "Tier" in table_tag.tagFQN:
tier = table_tag.tagFQN
else:
tags.add(table_tag.tagFQN)
self._parse_columns(
table.columns, None, column_names, column_descriptions, tags
)
database_entity = self.metadata.get_by_id(
entity=Database, entity_id=str(table.database.id.__root__)
)
service_entity = self.metadata.get_by_id(
entity=DatabaseService, entity_id=str(database_entity.service.id.__root__)
)
table_owner = str(table.owner.id.__root__) if table.owner is not None else ""
table_followers = []
if table.followers:
for follower in table.followers.__root__:
table_followers.append(str(follower.id.__root__))
table_doc = TableESDocument(
table_id=str(table.id.__root__),
database=str(database_entity.name.__root__),
service=service_entity.name,
service_type=service_entity.serviceType.name,
table_name=table.name.__root__,
suggest=suggest,
description=table.description,
table_type=table.tableType.name,
last_updated_timestamp=timestamp,
column_names=column_names,
column_descriptions=column_descriptions,
monthly_stats=table.usageSummary.monthlyStats.count,
monthly_percentile_rank=table.usageSummary.monthlyStats.percentileRank,
weekly_stats=table.usageSummary.weeklyStats.count,
weekly_percentile_rank=table.usageSummary.weeklyStats.percentileRank,
daily_stats=table.usageSummary.dailyStats.count,
daily_percentile_rank=table.usageSummary.dailyStats.percentileRank,
tier=tier,
tags=list(tags),
fqdn=fqdn,
schema_description=None,
owner=table_owner,
followers=table_followers,
)
return table_doc
def _create_topic_es_doc(self, topic: Topic):
fqdn = topic.fullyQualifiedName
topic_name = topic.name
suggest = [
{"input": [fqdn], "weight": 5},
{"input": [topic_name], "weight": 10},
]
tags = set()
timestamp = time.time()
service_entity = self.metadata.get_by_id(
entity=MessagingService, entity_id=str(topic.service.id.__root__)
)
topic_owner = str(topic.owner.id.__root__) if topic.owner is not None else ""
topic_followers = []
if topic.followers:
for follower in topic.followers.__root__:
topic_followers.append(str(follower.id.__root__))
tier = None
for topic_tag in topic.tags:
if "Tier" in topic_tag.tagFQN:
tier = topic_tag.tagFQN
else:
tags.add(topic_tag.tagFQN)
topic_doc = TopicESDocument(
topic_id=str(topic.id.__root__),
service=service_entity.name,
service_type=service_entity.serviceType.name,
topic_name=topic.name.__root__,
suggest=suggest,
description=topic.description,
last_updated_timestamp=timestamp,
tier=tier,
tags=list(tags),
fqdn=fqdn,
owner=topic_owner,
followers=topic_followers,
)
return topic_doc
def _create_dashboard_es_doc(self, dashboard: Dashboard):
fqdn = dashboard.fullyQualifiedName
dashboard_name = dashboard.name
suggest = [{"input": [dashboard.displayName], "weight": 10}]
tags = set()
timestamp = time.time()
service_entity = self.metadata.get_by_id(
entity=DashboardService, entity_id=str(dashboard.service.id.__root__)
)
dashboard_owner = (
str(dashboard.owner.id.__root__) if dashboard.owner is not None else ""
)
dashboard_followers = []
if dashboard.followers:
for follower in dashboard.followers.__root__:
dashboard_followers.append(str(follower.id.__root__))
tier = None
for dashboard_tag in dashboard.tags:
if "Tier" in dashboard_tag.tagFQN:
tier = dashboard_tag.tagFQN
else:
tags.add(dashboard_tag.tagFQN)
charts: List[Chart] = self._get_charts(dashboard.charts)
chart_names = []
chart_descriptions = []
for chart in charts:
chart_names.append(chart.displayName)
if chart.description is not None:
chart_descriptions.append(chart.description)
if len(chart.tags) > 0:
for col_tag in chart.tags:
tags.add(col_tag.tagFQN)
dashboard_doc = DashboardESDocument(
dashboard_id=str(dashboard.id.__root__),
service=service_entity.name,
service_type=service_entity.serviceType.name,
dashboard_name=dashboard.displayName,
chart_names=chart_names,
chart_descriptions=chart_descriptions,
suggest=suggest,
description=dashboard.description,
last_updated_timestamp=timestamp,
tier=tier,
tags=list(tags),
fqdn=fqdn,
owner=dashboard_owner,
followers=dashboard_followers,
monthly_stats=dashboard.usageSummary.monthlyStats.count,
monthly_percentile_rank=dashboard.usageSummary.monthlyStats.percentileRank,
weekly_stats=dashboard.usageSummary.weeklyStats.count,
weekly_percentile_rank=dashboard.usageSummary.weeklyStats.percentileRank,
daily_stats=dashboard.usageSummary.dailyStats.count,
daily_percentile_rank=dashboard.usageSummary.dailyStats.percentileRank,
)
return dashboard_doc
def _create_pipeline_es_doc(self, pipeline: Pipeline):
fqdn = pipeline.fullyQualifiedName
suggest = [{"input": [pipeline.displayName], "weight": 10}]
tags = set()
timestamp = time.time()
service_entity = self.metadata.get_by_id(
entity=PipelineService, entity_id=str(pipeline.service.id.__root__)
)
pipeline_owner = (
str(pipeline.owner.id.__root__) if pipeline.owner is not None else ""
)
pipeline_followers = []
if pipeline.followers:
for follower in pipeline.followers.__root__:
pipeline_followers.append(str(follower.id.__root__))
tier = None
for pipeline_tag in pipeline.tags:
if "Tier" in pipeline_tag.tagFQN:
tier = pipeline_tag.tagFQN
else:
tags.add(pipeline_tag.tagFQN)
tasks: List[Task] = pipeline.tasks
task_names = []
task_descriptions = []
for task in tasks:
task_names.append(task.displayName)
if task.description is not None:
task_descriptions.append(task.description)
if tags in task and len(task.tags) > 0:
for col_tag in task.tags:
tags.add(col_tag.tagFQN)
pipeline_doc = PipelineESDocument(
pipeline_id=str(pipeline.id.__root__),
service=service_entity.name,
service_type=service_entity.serviceType.name,
pipeline_name=pipeline.displayName,
task_names=task_names,
task_descriptions=task_descriptions,
suggest=suggest,
description=pipeline.description,
last_updated_timestamp=timestamp,
tier=tier,
tags=list(tags),
fqdn=fqdn,
owner=pipeline_owner,
followers=pipeline_followers,
)
return pipeline_doc
def _get_charts(self, chart_refs: Optional[List[entityReference.EntityReference]]):
charts = []
if chart_refs:
for chart_ref in chart_refs:
chart = self.metadata.get_by_id(
entity=Chart, entity_id=str(chart_ref.id.__root__), fields=["tags"]
)
charts.append(chart)
return charts
def _parse_columns(
self,
columns: List[Column],
parent_column,
column_names,
column_descriptions,
tags,
):
for column in columns:
col_name = (
parent_column + "." + column.name.__root__
if parent_column is not None
else column.name.__root__
)
column_names.append(col_name)
if column.description is not None:
column_descriptions.append(column.description)
if len(column.tags) > 0:
for col_tag in column.tags:
tags.add(col_tag.tagFQN)
if column.children is not None:
self._parse_columns(
column.children,
column.name.__root__,
column_names,
column_descriptions,
tags,
)
def get_status(self):
return self.status
def close(self):
self.elasticsearch_client.close()
| 39.712644
| 89
| 0.634732
|
ec4d101da11a11151e7178d45cd0d6faae08184e
| 1,496
|
py
|
Python
|
app/modules/products/schemas.py
|
ezequiaspedro/SBF-Api
|
547322505ed4f50bdf7dc86a341eee0a667f0a4c
|
[
"MIT"
] | null | null | null |
app/modules/products/schemas.py
|
ezequiaspedro/SBF-Api
|
547322505ed4f50bdf7dc86a341eee0a667f0a4c
|
[
"MIT"
] | null | null | null |
app/modules/products/schemas.py
|
ezequiaspedro/SBF-Api
|
547322505ed4f50bdf7dc86a341eee0a667f0a4c
|
[
"MIT"
] | 1
|
2021-06-12T01:36:17.000Z
|
2021-06-12T01:36:17.000Z
|
from typing import List, Optional
from ...utils.helpers import BaseSchema, MetaDatetimeSchema
from ...utils.pagination import PaginationMetadataSchema
class ProductCreate(BaseSchema):
name: str
size: str
inventory: int
weight: float
class Config:
schema_extra = {
"example": {
"name": "Camisa Azul",
"size": "P",
"inventory": 10,
"weight": 10.5
}
}
class ProductUpdate(BaseSchema):
name: Optional[str]
size: Optional[str]
weight: Optional[float]
class Config:
schema_extra = {
"example": {
"name": "Camisa Azul",
"size": "P",
"weight": 10.5
}
}
class ProductResponse(BaseSchema):
id: int
name: str
size: str
inventory: int
weight: float
metadatetime: MetaDatetimeSchema
class Config:
schema_extra = {
"example": {
"id": 1,
"name": "Camisa Amarela",
"size": "P",
"inventory": 10,
"weight": 10.5,
"metadatetime": {
"created_on": "2020-01-01T00:00:00.000001",
"updated_on": "2020-01-01T00:00:00.000001"
}
}
}
class ProductsResponse(BaseSchema):
pagination_metadata: Optional[PaginationMetadataSchema]
records: List[ProductResponse]
| 24.52459
| 63
| 0.504679
|
9a4e41fd2908c9013dea3076dbc1a090ef5bc1f8
| 3,883
|
py
|
Python
|
cirq-google/cirq_google/engine/engine_result.py
|
alexandrebouayad/Cirq
|
4ba730b17b6af6265ee6458eb40172b847bd5684
|
[
"Apache-2.0"
] | 1
|
2022-02-05T22:17:39.000Z
|
2022-02-05T22:17:39.000Z
|
cirq-google/cirq_google/engine/engine_result.py
|
pavoljuhas/Cirq
|
b6d6577be61d216ce2f29f8c64ae5879cf3087d5
|
[
"Apache-2.0"
] | 4
|
2022-01-16T14:12:15.000Z
|
2022-02-24T03:58:46.000Z
|
cirq-google/cirq_google/engine/engine_result.py
|
Nexuscompute/Cirq
|
640ef8f82d6a56ec95361388ce7976e096cca906
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from typing import Optional, Mapping, TYPE_CHECKING, Any, Dict
import numpy as np
from cirq import study
if TYPE_CHECKING:
import cirq
class EngineResult(study.ResultDict):
"""A ResultDict with additional job metadata.
Please see the documentation for `cirq.ResultDict` for more information.
Additional Attributes:
job_id: A string job identifier.
job_finished_time: A timestamp for when the job finished.
"""
def __init__(
self,
*, # Forces keyword args.
job_id: str,
job_finished_time: datetime.datetime,
params: Optional[study.ParamResolver] = None,
measurements: Optional[Mapping[str, np.ndarray]] = None,
records: Optional[Mapping[str, np.ndarray]] = None,
):
"""Initialize the result.
Args:
job_id: A string job identifier.
job_finished_time: A timestamp for when the job finished; will be converted to UTC.
params: A ParamResolver of settings used for this result.
measurements: A dictionary from measurement gate key to measurement
results. See `cirq.ResultDict`.
records: A dictionary from measurement gate key to measurement
results. See `cirq.ResultDict`.
"""
super().__init__(params=params, measurements=measurements, records=records)
self.job_id = job_id
self.job_finished_time = job_finished_time
@classmethod
def from_result(
cls, result: 'cirq.Result', *, job_id: str, job_finished_time: datetime.datetime
):
if isinstance(result, study.ResultDict):
# optimize by using private methods
return cls(
params=result._params,
measurements=result._measurements,
records=result._records,
job_id=job_id,
job_finished_time=job_finished_time,
)
else:
return cls(
params=result.params,
measurements=result.measurements,
records=result.records,
job_id=job_id,
job_finished_time=job_finished_time,
)
def __eq__(self, other):
if not isinstance(other, EngineResult):
return False
return (
super().__eq__(other)
and self.job_id == other.job_id
and self.job_finished_time == other.job_finished_time
)
def __repr__(self) -> str:
return (
f'cirq_google.EngineResult(params={self.params!r}, '
f'records={self._record_dict_repr()}, '
f'job_id={self.job_id!r}, '
f'job_finished_time={self.job_finished_time!r})'
)
@classmethod
def _json_namespace_(cls) -> str:
return 'cirq.google'
def _json_dict_(self) -> Dict[str, Any]:
d = super()._json_dict_()
d['job_id'] = self.job_id
d['job_finished_time'] = self.job_finished_time
return d
@classmethod
def _from_json_dict_(cls, params, records, job_id, job_finished_time, **kwargs):
return cls._from_packed_records(
params=params, records=records, job_id=job_id, job_finished_time=job_finished_time
)
| 34.061404
| 95
| 0.635076
|
04beea8218df6237c4582186ac25337e9a55d6bb
| 3,095
|
py
|
Python
|
app/app/settings.py
|
leeway00/recipie-app-api
|
d1c5c0855986254fa94dcd3ad836c89d83060722
|
[
"MIT"
] | null | null | null |
app/app/settings.py
|
leeway00/recipie-app-api
|
d1c5c0855986254fa94dcd3ad836c89d83060722
|
[
"MIT"
] | null | null | null |
app/app/settings.py
|
leeway00/recipie-app-api
|
d1c5c0855986254fa94dcd3ad836c89d83060722
|
[
"MIT"
] | null | null | null |
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5873&ctyy$=uy#vuy2tt693e7p1f5c+_*j=h3@_!ayx^eh*n5v'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'core.User'
| 25.162602
| 91
| 0.694992
|
69743e103cfab7bb79236becc0bdfee8a2f7f29c
| 959
|
py
|
Python
|
data/dbase/check_tables_complete.py
|
FedeClaudi/LocomotionControl
|
1281f7894825096ad212407351463a2105c5152a
|
[
"MIT"
] | null | null | null |
data/dbase/check_tables_complete.py
|
FedeClaudi/LocomotionControl
|
1281f7894825096ad212407351463a2105c5152a
|
[
"MIT"
] | 2
|
2020-11-23T16:32:11.000Z
|
2020-11-23T16:32:11.000Z
|
data/dbase/check_tables_complete.py
|
FedeClaudi/LocomotionControl
|
1281f7894825096ad212407351463a2105c5152a
|
[
"MIT"
] | null | null | null |
from loguru import logger
import sys
from tpd import recorder
sys.path.append("./")
from data import dbase
def get_sessions_to_track(recordings_only: bool = True):
"""
It gets a list of sessions left to track
"""
logger.info("Getting sessions that need to be tracked with DLC still")
sessions_in_table = dbase.db_tables.Session.fetch("name")
if recordings_only:
sessions_in_table = [
s
for s in sessions_in_table
if dbase.db_tables.Session.has_recording(s)
]
need_tracking = []
for session in sessions_in_table:
if not dbase.db_tables.Session.was_tracked(session):
need_tracking.append(session)
logger.info(
f"Found {len(need_tracking)}/{len(sessions_in_table)} sessions that still need to be tracked"
)
recorder.add_text(", ".join(need_tracking), name="__to_track")
if __name__ == "__main__":
get_sessions_to_track()
| 25.918919
| 101
| 0.67049
|
5b54ad71f0baf104129882681e6304fc8fbdeb45
| 448
|
py
|
Python
|
misc/setup.py
|
PoRiLo/arcgis-python-api
|
af89fd64db4ae57d100f3216a0a4f731c8bd0e68
|
[
"Apache-2.0"
] | 1,299
|
2016-06-12T15:58:47.000Z
|
2022-03-29T08:09:49.000Z
|
misc/setup.py
|
PoRiLo/arcgis-python-api
|
af89fd64db4ae57d100f3216a0a4f731c8bd0e68
|
[
"Apache-2.0"
] | 999
|
2016-08-10T13:46:15.000Z
|
2022-03-31T23:29:53.000Z
|
misc/setup.py
|
PoRiLo/arcgis-python-api
|
af89fd64db4ae57d100f3216a0a4f731c8bd0e68
|
[
"Apache-2.0"
] | 967
|
2016-06-12T06:03:41.000Z
|
2022-03-29T12:50:48.000Z
|
from _common import *
print("-*-*-*-*-*-*-*-*-*-*-*Setup begins*-*-*-*-*-*-*-*-*-*-*-*-*-")
clean_up_location_tracking(gis_playground)
delete_for_users(gis_online, ignore_accounts_online, target_accounts_online)
delete_for_users(gis_playground, ignore_accounts_playground, target_accounts_playground)
setup_tracker_user(gis_playground)
# publish_data(gis_online, data_paths)
print("-*-*-*-*-*-*-*-*-*-*-*Setup ends*-*-*-*-*-*-*-*-*-*-*-*-*-*-")
| 37.333333
| 88
| 0.676339
|
a225e2ea78c7b29e00376d661d289030ff053762
| 6,530
|
py
|
Python
|
rvranking/sampling/tests.py
|
hodeld/rbs-ranking
|
3c102e4442eb51dd33d8c06a303032ff18426ad4
|
[
"MIT"
] | null | null | null |
rvranking/sampling/tests.py
|
hodeld/rbs-ranking
|
3c102e4442eb51dd33d8c06a303032ff18426ad4
|
[
"MIT"
] | 2
|
2021-08-25T16:16:03.000Z
|
2022-02-10T04:57:01.000Z
|
rvranking/sampling/tests.py
|
hodeld/rbs-ranking
|
3c102e4442eb51dd33d8c06a303032ff18426ad4
|
[
"MIT"
] | null | null | null |
import random
import unittest
import numpy as np
from rvranking.rankingComponents import input_fn
from rvranking.sampling.elwcWrite import write_elwc
from rvranking.sampling.main import prep_samples_list
from rvranking.sampling.samplingClasses import Sample, RVList, RV
from rvranking.globalVars import _TRAIN_DATA_PATH, _RV_FEATURE, _EVENT_FEATURE, _EVENT_FEATURES, _RV_FEATURES
from rvranking.dataPrep import samples, timelines, allevents, prep_samples, get_timelines_raw, \
prep_timelines, prep_allevents, TD_PERWK, WEEKS_B, WEEKS_A, KMAX, rvs, rvfirstev, get_test_files, PPH, RV_TLINE_LEN
import tensorflow as tf
from rvranking.sampling.scikitDataGet import x_y_data
def sample_test(cls, s, tlines, allevs):
rv = s.rv
tline = tlines.loc[str(rv)]
ev_tline_val = tline.loc[str(s.start):str(s.end)].values
ev = allevs.loc[s.id]
if s.teams:
pass
else:
cls.assertEqual(ev['End'], s.end)
cls.assertEqual(ev['Start'], s.start)
cls.assertEqual(ev['Rv'], s.rv)
cls.assertEqual(ev['Type'], s.evtype)
cls.assertEqual(ev['Rv added'], s.rv_added)
evtype = s.evtype
cls.assertEqual((evtype == ev_tline_val).all(), True)
def sampling_test(cls, s, allevs_all=None):
if allevs_all is not None:
oneday = PPH * 24
range_start = int(s.rangestart - oneday)
if range_start < 0:
range_start = 0
range_end = int(s.rangeend + oneday)
if range_end > KMAX:
range_end = KMAX
idx = np.where((allevs_all['Start'] >= range_start) & (allevs_all['End'] <= range_end))
evs_range = allevs_all.iloc[idx]
for r in s.rvli:
cls.assertEqual(r.tline.size, RV_TLINE_LEN)
cls.assertEqual(r.tline.iloc[WEEKS_B * TD_PERWK], 0) # WEEKS_B * TD_PERWK
cls.assertEqual(r.tline.loc[str(s.start):str(s.end)].any(), False) # all zero
if allevs_all is not None:
index_vals = r.tline.index.values
min_t = int(index_vals[0])
max_t = int(index_vals[-1])
rv_added = s.rv_added
allevs = evs_range[evs_range['Rv'] == r.id]
allevs = allevs[allevs['Rv added'] >= rv_added]
for eid, row in allevs.iterrows():
if eid == s.id:
continue
st = row['Start']
et = row['End']
if st < min_t or et > max_t:
continue
cls.assertEqual(st, s.start)
r_tline_ev = r.tline.loc[str(st):str(et)].values
cls.assertEqual((0 == r_tline_ev).all(), True)
if 'rv_ff' in _EVENT_FEATURES or 'rv_ff' in _RV_FEATURES:
if s.rv_ff == r.id:
cls.assertEqual(r.rv_ff, 1)
class TestSampling(unittest.TestCase):
def test_samples(self):
sample_list_all = [Sample(s) for i, s in samples.iterrows()]
random.shuffle(sample_list_all)
tlines = timelines
allevs = allevents
for s in sample_list_all:
sample_test(self, s, tlines, allevs)
def test_prediction_samples(self):
samples_pred, tlines, allevs, rvs, rvfirstev = get_test_files()
sample_list_all = [Sample(s) for i, s in samples_pred.iterrows()]
s = sample_list_all[0]
ist = int(s.start - (TD_PERWK * WEEKS_B))
iet = int(s.start + TD_PERWK * WEEKS_A)
self.assertGreaterEqual(ist, 0) # ist >= 0
self.assertLessEqual(iet, KMAX)
for s in sample_list_all:
sample_test(self, s, tlines, allevs)
def test_sampling(self):
less_samples = samples.sample(n=50) # random rows
sample_list_all = [Sample(s) for i, s in less_samples.iterrows()]
rvlist_all = RVList([RV(r) for i, r in rvs.iterrows()])
train_ratio = 0.7
sample_list_train, sample_list_test = prep_samples_list(sample_list_all,
rvlist_all,
train_ratio=train_ratio,
timelines_spec=timelines,
rvfirstev_spec=rvfirstev,
allevents_spec=allevents
)
s_list_tot = sample_list_train + sample_list_test
assert len(s_list_tot) > 0
for s in s_list_tot:
sampling_test(self, s)
x_train, y_train, xy_train = x_y_data(s_list_tot)
def test_prediction_sampling(self):
samples_pred, tlines_pred, allevs_pred, rvs_pred, rvfirstev_pred = get_test_files()
sample_list_all = [Sample(s) for i, s in samples_pred.iterrows()]
rvlist_all = RVList([RV(r) for i, r in rvs_pred.iterrows()])
train_ratio = 0.7
sample_list_train, sample_list_test = prep_samples_list(sample_list_all,
rvlist_all,
train_ratio=train_ratio,
timelines_spec=tlines_pred,
rvfirstev_spec=rvfirstev_pred,
allevents_spec=allevs_pred
)
s_list_tot = sample_list_train + sample_list_test
for s in s_list_tot:
sampling_test(self, s, allevs_pred)
self.assertEqual(len(s.rvli), 5)
x_train, y_train, xy_train = x_y_data(s_list_tot)
def _test_write_and_input(self):
# _sampling
write_elwc()
feat, labs = input_fn(_TRAIN_DATA_PATH)
print('label', labs.shape)
for k, item in feat.items():
print('feat', k, item.shape)
print('first 5 labels', labs[0, :5].numpy()) # [0. 0. 0. 0. 1.]
event_t = tf.sparse.to_dense(feat[_EVENT_FEATURE]) # spare tensor to dense
rv_t = tf.sparse.to_dense(feat[_RV_FEATURE])
# print ('indices', query_st.indices[0][0]) #which indix has first value
print('event values', event_t[0])
# check slicing notification!
print('rv values', rv_t[0, :5, :10].numpy()) # sample 1, first 5 rvs, first 10 features
if __name__ == '__main__':
unittest.main()
print('sampling tests finished')
| 42.679739
| 119
| 0.565697
|
ef680b68a29e2fb55cc7fb5d45ed5cbda69d286f
| 44,201
|
py
|
Python
|
torch/nn/modules/activation.py
|
vuanvin/pytorch
|
9267fd8d7395074001ad7cf2a8f28082dbff6b0b
|
[
"Intel"
] | 1
|
2022-03-29T19:42:16.000Z
|
2022-03-29T19:42:16.000Z
|
torch/nn/modules/activation.py
|
vuanvin/pytorch
|
9267fd8d7395074001ad7cf2a8f28082dbff6b0b
|
[
"Intel"
] | 14
|
2021-10-14T06:58:50.000Z
|
2021-12-17T11:51:07.000Z
|
torch/nn/modules/activation.py
|
vuanvin/pytorch
|
9267fd8d7395074001ad7cf2a8f28082dbff6b0b
|
[
"Intel"
] | null | null | null |
import warnings
from typing import Optional, Tuple
import torch
from torch import Tensor
from .linear import NonDynamicallyQuantizableLinear
from torch.nn.init import constant_, xavier_normal_, xavier_uniform_
from torch.nn.parameter import Parameter
from .module import Module
from .. import functional as F
class Threshold(Module):
r"""Thresholds each element of the input Tensor.
Threshold is defined as:
.. math::
y =
\begin{cases}
x, &\text{ if } x > \text{threshold} \\
\text{value}, &\text{ otherwise }
\end{cases}
Args:
threshold: The value to threshold at
value: The value to replace with
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
Examples::
>>> m = nn.Threshold(0.1, 20)
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ['threshold', 'value', 'inplace']
threshold: float
value: float
inplace: bool
def __init__(self, threshold: float, value: float, inplace: bool = False) -> None:
super(Threshold, self).__init__()
self.threshold = threshold
self.value = value
self.inplace = inplace
# TODO: check in THNN (if inplace == True, then assert value <= threshold)
def forward(self, input: Tensor) -> Tensor:
return F.threshold(input, self.threshold, self.value, self.inplace)
def extra_repr(self):
inplace_str = ', inplace=True' if self.inplace else ''
return 'threshold={}, value={}{}'.format(
self.threshold, self.value, inplace_str
)
class ReLU(Module):
r"""Applies the rectified linear unit function element-wise:
:math:`\text{ReLU}(x) = (x)^+ = \max(0, x)`
Args:
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/ReLU.png
Examples::
>>> m = nn.ReLU()
>>> input = torch.randn(2)
>>> output = m(input)
An implementation of CReLU - https://arxiv.org/abs/1603.05201
>>> m = nn.ReLU()
>>> input = torch.randn(2).unsqueeze(0)
>>> output = torch.cat((m(input),m(-input)))
"""
__constants__ = ['inplace']
inplace: bool
def __init__(self, inplace: bool = False):
super(ReLU, self).__init__()
self.inplace = inplace
def forward(self, input: Tensor) -> Tensor:
return F.relu(input, inplace=self.inplace)
def extra_repr(self) -> str:
inplace_str = 'inplace=True' if self.inplace else ''
return inplace_str
class RReLU(Module):
r"""Applies the randomized leaky rectified liner unit function, element-wise,
as described in the paper:
`Empirical Evaluation of Rectified Activations in Convolutional Network`_.
The function is defined as:
.. math::
\text{RReLU}(x) =
\begin{cases}
x & \text{if } x \geq 0 \\
ax & \text{ otherwise }
\end{cases}
where :math:`a` is randomly sampled from uniform distribution
:math:`\mathcal{U}(\text{lower}, \text{upper})`.
See: https://arxiv.org/pdf/1505.00853.pdf
Args:
lower: lower bound of the uniform distribution. Default: :math:`\frac{1}{8}`
upper: upper bound of the uniform distribution. Default: :math:`\frac{1}{3}`
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/RReLU.png
Examples::
>>> m = nn.RReLU(0.1, 0.3)
>>> input = torch.randn(2)
>>> output = m(input)
.. _`Empirical Evaluation of Rectified Activations in Convolutional Network`:
https://arxiv.org/abs/1505.00853
"""
__constants__ = ['lower', 'upper', 'inplace']
lower: float
upper: float
inplace: bool
def __init__(
self,
lower: float = 1. / 8,
upper: float = 1. / 3,
inplace: bool = False
):
super(RReLU, self).__init__()
self.lower = lower
self.upper = upper
self.inplace = inplace
def forward(self, input: Tensor) -> Tensor:
return F.rrelu(input, self.lower, self.upper, self.training, self.inplace)
def extra_repr(self):
inplace_str = ', inplace=True' if self.inplace else ''
return 'lower={}, upper={}{}'.format(self.lower, self.upper, inplace_str)
class Hardtanh(Module):
r"""Applies the HardTanh function element-wise.
HardTanh is defined as:
.. math::
\text{HardTanh}(x) = \begin{cases}
1 & \text{ if } x > 1 \\
-1 & \text{ if } x < -1 \\
x & \text{ otherwise } \\
\end{cases}
The range of the linear region :math:`[-1, 1]` can be adjusted using
:attr:`min_val` and :attr:`max_val`.
Args:
min_val: minimum value of the linear region range. Default: -1
max_val: maximum value of the linear region range. Default: 1
inplace: can optionally do the operation in-place. Default: ``False``
Keyword arguments :attr:`min_value` and :attr:`max_value`
have been deprecated in favor of :attr:`min_val` and :attr:`max_val`.
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Hardtanh.png
Examples::
>>> m = nn.Hardtanh(-2, 2)
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ['min_val', 'max_val', 'inplace']
min_val: float
max_val: float
inplace: bool
def __init__(
self,
min_val: float = -1.,
max_val: float = 1.,
inplace: bool = False,
min_value: Optional[float] = None,
max_value: Optional[float] = None
) -> None:
super(Hardtanh, self).__init__()
if min_value is not None:
warnings.warn("keyword argument min_value is deprecated and rename to min_val")
min_val = min_value
if max_value is not None:
warnings.warn("keyword argument max_value is deprecated and rename to max_val")
max_val = max_value
self.min_val = min_val
self.max_val = max_val
self.inplace = inplace
assert self.max_val > self.min_val
def forward(self, input: Tensor) -> Tensor:
return F.hardtanh(input, self.min_val, self.max_val, self.inplace)
def extra_repr(self) -> str:
inplace_str = ', inplace=True' if self.inplace else ''
return 'min_val={}, max_val={}{}'.format(
self.min_val, self.max_val, inplace_str
)
class ReLU6(Hardtanh):
r"""Applies the element-wise function:
.. math::
\text{ReLU6}(x) = \min(\max(0,x), 6)
Args:
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/ReLU6.png
Examples::
>>> m = nn.ReLU6()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def __init__(self, inplace: bool = False):
super(ReLU6, self).__init__(0., 6., inplace)
def extra_repr(self) -> str:
inplace_str = 'inplace=True' if self.inplace else ''
return inplace_str
class Sigmoid(Module):
r"""Applies the element-wise function:
.. math::
\text{Sigmoid}(x) = \sigma(x) = \frac{1}{1 + \exp(-x)}
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Sigmoid.png
Examples::
>>> m = nn.Sigmoid()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def forward(self, input: Tensor) -> Tensor:
return torch.sigmoid(input)
class Hardsigmoid(Module):
r"""Applies the Hardsigmoid function element-wise.
Hardsigmoid is defined as:
.. math::
\text{Hardsigmoid}(x) = \begin{cases}
0 & \text{if~} x \le -3, \\
1 & \text{if~} x \ge +3, \\
x / 6 + 1 / 2 & \text{otherwise}
\end{cases}
Args:
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Hardsigmoid.png
Examples::
>>> m = nn.Hardsigmoid()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ['inplace']
inplace: bool
def __init__(self, inplace : bool = False) -> None:
super(Hardsigmoid, self).__init__()
self.inplace = inplace
def forward(self, input: Tensor) -> Tensor:
return F.hardsigmoid(input, self.inplace)
class Tanh(Module):
r"""Applies the Hyperbolic Tangent (Tanh) function element-wise.
Tanh is defined as:
.. math::
\text{Tanh}(x) = \tanh(x) = \frac{\exp(x) - \exp(-x)} {\exp(x) + \exp(-x)}
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Tanh.png
Examples::
>>> m = nn.Tanh()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def forward(self, input: Tensor) -> Tensor:
return torch.tanh(input)
class SiLU(Module):
r"""Applies the Sigmoid Linear Unit (SiLU) function, element-wise.
The SiLU function is also known as the swish function.
.. math::
\text{silu}(x) = x * \sigma(x), \text{where } \sigma(x) \text{ is the logistic sigmoid.}
.. note::
See `Gaussian Error Linear Units (GELUs) <https://arxiv.org/abs/1606.08415>`_
where the SiLU (Sigmoid Linear Unit) was originally coined, and see
`Sigmoid-Weighted Linear Units for Neural Network Function Approximation
in Reinforcement Learning <https://arxiv.org/abs/1702.03118>`_ and `Swish:
a Self-Gated Activation Function <https://arxiv.org/abs/1710.05941v1>`_
where the SiLU was experimented with later.
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/SiLU.png
Examples::
>>> m = nn.SiLU()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ['inplace']
inplace: bool
def __init__(self, inplace: bool = False):
super(SiLU, self).__init__()
self.inplace = inplace
def forward(self, input: Tensor) -> Tensor:
return F.silu(input, inplace=self.inplace)
def extra_repr(self) -> str:
inplace_str = 'inplace=True' if self.inplace else ''
return inplace_str
class Mish(Module):
r"""Applies the Mish function, element-wise.
Mish: A Self Regularized Non-Monotonic Neural Activation Function.
.. math::
\text{Mish}(x) = x * \text{Tanh}(\text{Softplus}(x))
.. note::
See `Mish: A Self Regularized Non-Monotonic Neural Activation Function <https://arxiv.org/abs/1908.08681>`_
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Mish.png
Examples::
>>> m = nn.Mish()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ['inplace']
inplace: bool
def __init__(self, inplace: bool = False):
super(Mish, self).__init__()
self.inplace = inplace
def forward(self, input: Tensor) -> Tensor:
return F.mish(input, inplace=self.inplace)
def extra_repr(self) -> str:
inplace_str = 'inplace=True' if self.inplace else ''
return inplace_str
class Hardswish(Module):
r"""Applies the hardswish function, element-wise, as described in the paper:
`Searching for MobileNetV3`_.
.. math::
\text{Hardswish}(x) = \begin{cases}
0 & \text{if~} x \le -3, \\
x & \text{if~} x \ge +3, \\
x \cdot (x + 3) /6 & \text{otherwise}
\end{cases}
Args:
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Hardswish.png
Examples::
>>> m = nn.Hardswish()
>>> input = torch.randn(2)
>>> output = m(input)
.. _`Searching for MobileNetV3`:
https://arxiv.org/abs/1905.02244
"""
__constants__ = ['inplace']
inplace: bool
def __init__(self, inplace : bool = False) -> None:
super(Hardswish, self).__init__()
self.inplace = inplace
def forward(self, input: Tensor) -> Tensor:
return F.hardswish(input, self.inplace)
class ELU(Module):
r"""Applies the Exponential Linear Unit (ELU) function, element-wise, as described
in the paper: `Fast and Accurate Deep Network Learning by Exponential Linear
Units (ELUs) <https://arxiv.org/abs/1511.07289>`__.
ELU is defined as:
.. math::
\text{ELU}(x) = \begin{cases}
x, & \text{ if } x > 0\\
\alpha * (\exp(x) - 1), & \text{ if } x \leq 0
\end{cases}
Args:
alpha: the :math:`\alpha` value for the ELU formulation. Default: 1.0
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/ELU.png
Examples::
>>> m = nn.ELU()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ['alpha', 'inplace']
alpha: float
inplace: bool
def __init__(self, alpha: float = 1., inplace: bool = False) -> None:
super(ELU, self).__init__()
self.alpha = alpha
self.inplace = inplace
def forward(self, input: Tensor) -> Tensor:
return F.elu(input, self.alpha, self.inplace)
def extra_repr(self) -> str:
inplace_str = ', inplace=True' if self.inplace else ''
return 'alpha={}{}'.format(self.alpha, inplace_str)
class CELU(Module):
r"""Applies the element-wise function:
.. math::
\text{CELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x/\alpha) - 1))
More details can be found in the paper `Continuously Differentiable Exponential Linear Units`_ .
Args:
alpha: the :math:`\alpha` value for the CELU formulation. Default: 1.0
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/CELU.png
Examples::
>>> m = nn.CELU()
>>> input = torch.randn(2)
>>> output = m(input)
.. _`Continuously Differentiable Exponential Linear Units`:
https://arxiv.org/abs/1704.07483
"""
__constants__ = ['alpha', 'inplace']
alpha: float
inplace: bool
def __init__(self, alpha: float = 1., inplace: bool = False) -> None:
super(CELU, self).__init__()
self.alpha = alpha
self.inplace = inplace
def forward(self, input: Tensor) -> Tensor:
return F.celu(input, self.alpha, self.inplace)
def extra_repr(self) -> str:
inplace_str = ', inplace=True' if self.inplace else ''
return 'alpha={}{}'.format(self.alpha, inplace_str)
class SELU(Module):
r"""Applied element-wise, as:
.. math::
\text{SELU}(x) = \text{scale} * (\max(0,x) + \min(0, \alpha * (\exp(x) - 1)))
with :math:`\alpha = 1.6732632423543772848170429916717` and
:math:`\text{scale} = 1.0507009873554804934193349852946`.
.. warning::
When using ``kaiming_normal`` or ``kaiming_normal_`` for initialisation,
``nonlinearity='linear'`` should be used instead of ``nonlinearity='selu'``
in order to get `Self-Normalizing Neural Networks`_.
See :func:`torch.nn.init.calculate_gain` for more information.
More details can be found in the paper `Self-Normalizing Neural Networks`_ .
Args:
inplace (bool, optional): can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/SELU.png
Examples::
>>> m = nn.SELU()
>>> input = torch.randn(2)
>>> output = m(input)
.. _Self-Normalizing Neural Networks: https://arxiv.org/abs/1706.02515
"""
__constants__ = ['inplace']
inplace: bool
def __init__(self, inplace: bool = False) -> None:
super(SELU, self).__init__()
self.inplace = inplace
def forward(self, input: Tensor) -> Tensor:
return F.selu(input, self.inplace)
def extra_repr(self) -> str:
inplace_str = 'inplace=True' if self.inplace else ''
return inplace_str
class GLU(Module):
r"""Applies the gated linear unit function
:math:`{GLU}(a, b)= a \otimes \sigma(b)` where :math:`a` is the first half
of the input matrices and :math:`b` is the second half.
Args:
dim (int): the dimension on which to split the input. Default: -1
Shape:
- Input: :math:`(\ast_1, N, \ast_2)` where `*` means, any number of additional
dimensions
- Output: :math:`(\ast_1, M, \ast_2)` where :math:`M=N/2`
Examples::
>>> m = nn.GLU()
>>> input = torch.randn(4, 2)
>>> output = m(input)
"""
__constants__ = ['dim']
dim: int
def __init__(self, dim: int = -1) -> None:
super(GLU, self).__init__()
self.dim = dim
def forward(self, input: Tensor) -> Tensor:
return F.glu(input, self.dim)
def extra_repr(self) -> str:
return 'dim={}'.format(self.dim)
class GELU(Module):
r"""Applies the Gaussian Error Linear Units function:
.. math:: \text{GELU}(x) = x * \Phi(x)
where :math:`\Phi(x)` is the Cumulative Distribution Function for Gaussian Distribution.
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/GELU.png
Examples::
>>> m = nn.GELU()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def forward(self, input: Tensor) -> Tensor:
return F.gelu(input)
class Hardshrink(Module):
r"""Applies the Hard Shrinkage (Hardshrink) function element-wise.
Hardshrink is defined as:
.. math::
\text{HardShrink}(x) =
\begin{cases}
x, & \text{ if } x > \lambda \\
x, & \text{ if } x < -\lambda \\
0, & \text{ otherwise }
\end{cases}
Args:
lambd: the :math:`\lambda` value for the Hardshrink formulation. Default: 0.5
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Hardshrink.png
Examples::
>>> m = nn.Hardshrink()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ['lambd']
lambd: float
def __init__(self, lambd: float = 0.5) -> None:
super(Hardshrink, self).__init__()
self.lambd = lambd
def forward(self, input: Tensor) -> Tensor:
return F.hardshrink(input, self.lambd)
def extra_repr(self) -> str:
return '{}'.format(self.lambd)
class LeakyReLU(Module):
r"""Applies the element-wise function:
.. math::
\text{LeakyReLU}(x) = \max(0, x) + \text{negative\_slope} * \min(0, x)
or
.. math::
\text{LeakyRELU}(x) =
\begin{cases}
x, & \text{ if } x \geq 0 \\
\text{negative\_slope} \times x, & \text{ otherwise }
\end{cases}
Args:
negative_slope: Controls the angle of the negative slope. Default: 1e-2
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)` where `*` means, any number of additional
dimensions
- Output: :math:`(*)`, same shape as the input
.. image:: ../scripts/activation_images/LeakyReLU.png
Examples::
>>> m = nn.LeakyReLU(0.1)
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ['inplace', 'negative_slope']
inplace: bool
negative_slope: float
def __init__(self, negative_slope: float = 1e-2, inplace: bool = False) -> None:
super(LeakyReLU, self).__init__()
self.negative_slope = negative_slope
self.inplace = inplace
def forward(self, input: Tensor) -> Tensor:
return F.leaky_relu(input, self.negative_slope, self.inplace)
def extra_repr(self) -> str:
inplace_str = ', inplace=True' if self.inplace else ''
return 'negative_slope={}{}'.format(self.negative_slope, inplace_str)
class LogSigmoid(Module):
r"""Applies the element-wise function:
.. math::
\text{LogSigmoid}(x) = \log\left(\frac{ 1 }{ 1 + \exp(-x)}\right)
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/LogSigmoid.png
Examples::
>>> m = nn.LogSigmoid()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def forward(self, input: Tensor) -> Tensor:
return F.logsigmoid(input)
class Softplus(Module):
r"""Applies the Softplus function :math:`\text{Softplus}(x) = \frac{1}{\beta} *
\log(1 + \exp(\beta * x))` element-wise.
SoftPlus is a smooth approximation to the ReLU function and can be used
to constrain the output of a machine to always be positive.
For numerical stability the implementation reverts to the linear function
when :math:`input \times \beta > threshold`.
Args:
beta: the :math:`\beta` value for the Softplus formulation. Default: 1
threshold: values above this revert to a linear function. Default: 20
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Softplus.png
Examples::
>>> m = nn.Softplus()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ['beta', 'threshold']
beta: int
threshold: int
def __init__(self, beta: int = 1, threshold: int = 20) -> None:
super(Softplus, self).__init__()
self.beta = beta
self.threshold = threshold
def forward(self, input: Tensor) -> Tensor:
return F.softplus(input, self.beta, self.threshold)
def extra_repr(self) -> str:
return 'beta={}, threshold={}'.format(self.beta, self.threshold)
class Softshrink(Module):
r"""Applies the soft shrinkage function elementwise:
.. math::
\text{SoftShrinkage}(x) =
\begin{cases}
x - \lambda, & \text{ if } x > \lambda \\
x + \lambda, & \text{ if } x < -\lambda \\
0, & \text{ otherwise }
\end{cases}
Args:
lambd: the :math:`\lambda` (must be no less than zero) value for the Softshrink formulation. Default: 0.5
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Softshrink.png
Examples::
>>> m = nn.Softshrink()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ['lambd']
lambd: float
def __init__(self, lambd: float = 0.5) -> None:
super(Softshrink, self).__init__()
self.lambd = lambd
def forward(self, input: Tensor) -> Tensor:
return F.softshrink(input, self.lambd)
def extra_repr(self) -> str:
return str(self.lambd)
class MultiheadAttention(Module):
r"""Allows the model to jointly attend to information
from different representation subspaces.
See `Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_.
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
where :math:`head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)`.
Args:
embed_dim: Total dimension of the model.
num_heads: Number of parallel attention heads. Note that ``embed_dim`` will be split
across ``num_heads`` (i.e. each head will have dimension ``embed_dim // num_heads``).
dropout: Dropout probability on ``attn_output_weights``. Default: ``0.0`` (no dropout).
bias: If specified, adds bias to input / output projection layers. Default: ``True``.
add_bias_kv: If specified, adds bias to the key and value sequences at dim=0. Default: ``False``.
add_zero_attn: If specified, adds a new batch of zeros to the key and value sequences at dim=1.
Default: ``False``.
kdim: Total number of features for keys. Default: ``None`` (uses ``kdim=embed_dim``).
vdim: Total number of features for values. Default: ``None`` (uses ``vdim=embed_dim``).
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
Examples::
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
__constants__ = ['batch_first']
bias_k: Optional[torch.Tensor]
bias_v: Optional[torch.Tensor]
def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False,
kdim=None, vdim=None, batch_first=False, device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.batch_first = batch_first
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
if self._qkv_same_embed_dim is False:
self.q_proj_weight = Parameter(torch.empty((embed_dim, embed_dim), **factory_kwargs))
self.k_proj_weight = Parameter(torch.empty((embed_dim, self.kdim), **factory_kwargs))
self.v_proj_weight = Parameter(torch.empty((embed_dim, self.vdim), **factory_kwargs))
self.register_parameter('in_proj_weight', None)
else:
self.in_proj_weight = Parameter(torch.empty((3 * embed_dim, embed_dim), **factory_kwargs))
self.register_parameter('q_proj_weight', None)
self.register_parameter('k_proj_weight', None)
self.register_parameter('v_proj_weight', None)
if bias:
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim, **factory_kwargs))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = NonDynamicallyQuantizableLinear(embed_dim, embed_dim, bias=bias, **factory_kwargs)
if add_bias_kv:
self.bias_k = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))
self.bias_v = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self):
if self._qkv_same_embed_dim:
xavier_uniform_(self.in_proj_weight)
else:
xavier_uniform_(self.q_proj_weight)
xavier_uniform_(self.k_proj_weight)
xavier_uniform_(self.v_proj_weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.)
constant_(self.out_proj.bias, 0.)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
def __setstate__(self, state):
# Support loading old MultiheadAttention checkpoints generated by v1.1.0
if '_qkv_same_embed_dim' not in state:
state['_qkv_same_embed_dim'] = True
super(MultiheadAttention, self).__setstate__(state)
def forward(self, query: Tensor, key: Tensor, value: Tensor, key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True, attn_mask: Optional[Tensor] = None,
average_attn_weights: bool = True) -> Tuple[Tensor, Optional[Tensor]]:
r"""
Args:
query: Query embeddings of shape :math:`(L, E_q)` for unbatched input, :math:`(L, N, E_q)` when ``batch_first=False``
or :math:`(N, L, E_q)` when ``batch_first=True``, where :math:`L` is the target sequence length,
:math:`N` is the batch size, and :math:`E_q` is the query embedding dimension ``embed_dim``.
Queries are compared against key-value pairs to produce the output.
See "Attention Is All You Need" for more details.
key: Key embeddings of shape :math:`(S, E_k)` for unbatched input, :math:`(S, N, E_k)` when ``batch_first=False``
or :math:`(N, S, E_k)` when ``batch_first=True``, where :math:`S` is the source sequence length,
:math:`N` is the batch size, and :math:`E_k` is the key embedding dimension ``kdim``.
See "Attention Is All You Need" for more details.
value: Value embeddings of shape :math:`(S, E_v)` for unbatched input, :math:`(S, N, E_v)` when
``batch_first=False`` or :math:`(N, S, E_v)` when ``batch_first=True``, where :math:`S` is the source
sequence length, :math:`N` is the batch size, and :math:`E_v` is the value embedding dimension ``vdim``.
See "Attention Is All You Need" for more details.
key_padding_mask: If specified, a mask of shape :math:`(N, S)` indicating which elements within ``key``
to ignore for the purpose of attention (i.e. treat as "padding"). For unbatched `query`, shape should be :math:`(S)`.
Binary and byte masks are supported.
For a binary mask, a ``True`` value indicates that the corresponding ``key`` value will be ignored for
the purpose of attention. For a byte mask, a non-zero value indicates that the corresponding ``key``
value will be ignored.
need_weights: If specified, returns ``attn_output_weights`` in addition to ``attn_outputs``.
Default: ``True``.
attn_mask: If specified, a 2D or 3D mask preventing attention to certain positions. Must be of shape
:math:`(L, S)` or :math:`(N\cdot\text{num\_heads}, L, S)`, where :math:`N` is the batch size,
:math:`L` is the target sequence length, and :math:`S` is the source sequence length. A 2D mask will be
broadcasted across the batch while a 3D mask allows for a different mask for each entry in the batch.
Binary, byte, and float masks are supported. For a binary mask, a ``True`` value indicates that the
corresponding position is not allowed to attend. For a byte mask, a non-zero value indicates that the
corresponding position is not allowed to attend. For a float mask, the mask values will be added to
the attention weight.
average_attn_weights: If true, indicates that the returned ``attn_weights`` should be averaged across
heads. Otherwise, ``attn_weights`` are provided separately per head. Note that this flag only has an
effect when ``need_weights=True.``. Default: True (i.e. average weights across heads)
Outputs:
- **attn_output** - Attention outputs of shape :math:`(L, E)` when input is unbatched,
:math:`(L, N, E)` when ``batch_first=False`` or :math:`(N, L, E)` when ``batch_first=True``,
where :math:`L` is the target sequence length, :math:`N` is the batch size, and :math:`E` is the
embedding dimension ``embed_dim``.
- **attn_output_weights** - Only returned when ``need_weights=True``. If ``average_attn_weights=True``,
returns attention weights averaged across heads of shape :math:`(L, S)` when input is unbatched or
:math:`(N, L, S)`, where :math:`N` is the batch size, :math:`L` is the target sequence length, and
:math:`S` is the source sequence length. If ``average_weights=False``, returns attention weights per
head of shape :math:`(num_heads, L, S)` when input is unbatched or :math:`(N, num_heads, L, S)`.
.. note::
`batch_first` argument is ignored for unbatched inputs.
"""
is_batched = query.dim() == 3
if self.batch_first and is_batched:
query, key, value = [x.transpose(1, 0) for x in (query, key, value)]
if not self._qkv_same_embed_dim:
attn_output, attn_output_weights = F.multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight, average_attn_weights=average_attn_weights)
else:
attn_output, attn_output_weights = F.multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, average_attn_weights=average_attn_weights)
if self.batch_first and is_batched:
return attn_output.transpose(1, 0), attn_output_weights
else:
return attn_output, attn_output_weights
class PReLU(Module):
r"""Applies the element-wise function:
.. math::
\text{PReLU}(x) = \max(0,x) + a * \min(0,x)
or
.. math::
\text{PReLU}(x) =
\begin{cases}
x, & \text{ if } x \geq 0 \\
ax, & \text{ otherwise }
\end{cases}
Here :math:`a` is a learnable parameter. When called without arguments, `nn.PReLU()` uses a single
parameter :math:`a` across all input channels. If called with `nn.PReLU(nChannels)`,
a separate :math:`a` is used for each input channel.
.. note::
weight decay should not be used when learning :math:`a` for good performance.
.. note::
Channel dim is the 2nd dim of input. When input has dims < 2, then there is
no channel dim and the number of channels = 1.
Args:
num_parameters (int): number of :math:`a` to learn.
Although it takes an int as input, there is only two values are legitimate:
1, or the number of channels at input. Default: 1
init (float): the initial value of :math:`a`. Default: 0.25
Shape:
- Input: :math:`( *)` where `*` means, any number of additional
dimensions.
- Output: :math:`(*)`, same shape as the input.
Attributes:
weight (Tensor): the learnable weights of shape (:attr:`num_parameters`).
.. image:: ../scripts/activation_images/PReLU.png
Examples::
>>> m = nn.PReLU()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ['num_parameters']
num_parameters: int
def __init__(self, num_parameters: int = 1, init: float = 0.25,
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
self.num_parameters = num_parameters
super(PReLU, self).__init__()
self.weight = Parameter(torch.empty(num_parameters, **factory_kwargs).fill_(init))
def forward(self, input: Tensor) -> Tensor:
return F.prelu(input, self.weight)
def extra_repr(self) -> str:
return 'num_parameters={}'.format(self.num_parameters)
class Softsign(Module):
r"""Applies the element-wise function:
.. math::
\text{SoftSign}(x) = \frac{x}{ 1 + |x|}
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Softsign.png
Examples::
>>> m = nn.Softsign()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def forward(self, input: Tensor) -> Tensor:
return F.softsign(input)
class Tanhshrink(Module):
r"""Applies the element-wise function:
.. math::
\text{Tanhshrink}(x) = x - \tanh(x)
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Tanhshrink.png
Examples::
>>> m = nn.Tanhshrink()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def forward(self, input: Tensor) -> Tensor:
return F.tanhshrink(input)
class Softmin(Module):
r"""Applies the Softmin function to an n-dimensional input Tensor
rescaling them so that the elements of the n-dimensional output Tensor
lie in the range `[0, 1]` and sum to 1.
Softmin is defined as:
.. math::
\text{Softmin}(x_{i}) = \frac{\exp(-x_i)}{\sum_j \exp(-x_j)}
Shape:
- Input: :math:`(*)` where `*` means, any number of additional
dimensions
- Output: :math:`(*)`, same shape as the input
Args:
dim (int): A dimension along which Softmin will be computed (so every slice
along dim will sum to 1).
Returns:
a Tensor of the same dimension and shape as the input, with
values in the range [0, 1]
Examples::
>>> m = nn.Softmin()
>>> input = torch.randn(2, 3)
>>> output = m(input)
"""
__constants__ = ['dim']
dim: Optional[int]
def __init__(self, dim: Optional[int] = None) -> None:
super(Softmin, self).__init__()
self.dim = dim
def __setstate__(self, state):
self.__dict__.update(state)
if not hasattr(self, 'dim'):
self.dim = None
def forward(self, input: Tensor) -> Tensor:
return F.softmin(input, self.dim, _stacklevel=5)
def extra_repr(self):
return 'dim={dim}'.format(dim=self.dim)
class Softmax(Module):
r"""Applies the Softmax function to an n-dimensional input Tensor
rescaling them so that the elements of the n-dimensional output Tensor
lie in the range [0,1] and sum to 1.
Softmax is defined as:
.. math::
\text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}
When the input Tensor is a sparse tensor then the unspecifed
values are treated as ``-inf``.
Shape:
- Input: :math:`(*)` where `*` means, any number of additional
dimensions
- Output: :math:`(*)`, same shape as the input
Returns:
a Tensor of the same dimension and shape as the input with
values in the range [0, 1]
Args:
dim (int): A dimension along which Softmax will be computed (so every slice
along dim will sum to 1).
.. note::
This module doesn't work directly with NLLLoss,
which expects the Log to be computed between the Softmax and itself.
Use `LogSoftmax` instead (it's faster and has better numerical properties).
Examples::
>>> m = nn.Softmax(dim=1)
>>> input = torch.randn(2, 3)
>>> output = m(input)
"""
__constants__ = ['dim']
dim: Optional[int]
def __init__(self, dim: Optional[int] = None) -> None:
super(Softmax, self).__init__()
self.dim = dim
def __setstate__(self, state):
self.__dict__.update(state)
if not hasattr(self, 'dim'):
self.dim = None
def forward(self, input: Tensor) -> Tensor:
return F.softmax(input, self.dim, _stacklevel=5)
def extra_repr(self) -> str:
return 'dim={dim}'.format(dim=self.dim)
class Softmax2d(Module):
r"""Applies SoftMax over features to each spatial location.
When given an image of ``Channels x Height x Width``, it will
apply `Softmax` to each location :math:`(Channels, h_i, w_j)`
Shape:
- Input: :math:`(N, C, H, W)` or :math:`(C, H, W)`.
- Output: :math:`(N, C, H, W)` or :math:`(C, H, W)` (same shape as input)
Returns:
a Tensor of the same dimension and shape as the input with
values in the range [0, 1]
Examples::
>>> m = nn.Softmax2d()
>>> # you softmax over the 2nd dimension
>>> input = torch.randn(2, 3, 12, 13)
>>> output = m(input)
"""
def forward(self, input: Tensor) -> Tensor:
assert input.dim() == 4 or input.dim() == 3, 'Softmax2d requires a 3D or 4D tensor as input'
return F.softmax(input, -3, _stacklevel=5)
class LogSoftmax(Module):
r"""Applies the :math:`\log(\text{Softmax}(x))` function to an n-dimensional
input Tensor. The LogSoftmax formulation can be simplified as:
.. math::
\text{LogSoftmax}(x_{i}) = \log\left(\frac{\exp(x_i) }{ \sum_j \exp(x_j)} \right)
Shape:
- Input: :math:`(*)` where `*` means, any number of additional
dimensions
- Output: :math:`(*)`, same shape as the input
Args:
dim (int): A dimension along which LogSoftmax will be computed.
Returns:
a Tensor of the same dimension and shape as the input with
values in the range [-inf, 0)
Examples::
>>> m = nn.LogSoftmax()
>>> input = torch.randn(2, 3)
>>> output = m(input)
"""
__constants__ = ['dim']
dim: Optional[int]
def __init__(self, dim: Optional[int] = None) -> None:
super(LogSoftmax, self).__init__()
self.dim = dim
def __setstate__(self, state):
self.__dict__.update(state)
if not hasattr(self, 'dim'):
self.dim = None
def forward(self, input: Tensor) -> Tensor:
return F.log_softmax(input, self.dim, _stacklevel=5)
def extra_repr(self):
return 'dim={dim}'.format(dim=self.dim)
| 33.134183
| 129
| 0.600778
|
e7d1952af5e3755f91ab6bfcd478698966766722
| 724
|
py
|
Python
|
text/symbols.py
|
MikhailSukhovei/tacotron2_original
|
39fd0b825b27d54a75770b004ee2801a8364ec73
|
[
"BSD-3-Clause"
] | null | null | null |
text/symbols.py
|
MikhailSukhovei/tacotron2_original
|
39fd0b825b27d54a75770b004ee2801a8364ec73
|
[
"BSD-3-Clause"
] | null | null | null |
text/symbols.py
|
MikhailSukhovei/tacotron2_original
|
39fd0b825b27d54a75770b004ee2801a8364ec73
|
[
"BSD-3-Clause"
] | null | null | null |
""" from https://github.com/keithito/tacotron """
'''
Defines the set of symbols used in text input to the model.
The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. '''
from text import cmudict
_pad = ''
_punctuation = '!,-.:?… '
_special = '~'
_letters = 'АБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдеёжзийклмнопрстуфхцчшщъыьэюя'
# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters):
_arpabet = ['@' + s for s in cmudict.valid_symbols]
# Export all symbols:
symbols = [_pad] + list(_special) + list(_punctuation) + list(_letters) + _arpabet
| 38.105263
| 195
| 0.743094
|
9b50d1bb9c21ff46a006fb14f7157f5053bdc0c7
| 5,237
|
py
|
Python
|
mindware/components/models/regression/liblinear_svr.py
|
aman-gupta-1995/Machine-Learning-Mindware
|
8b3050720711730520683c89949e3dbdfb168961
|
[
"MIT"
] | 27
|
2021-07-19T09:03:34.000Z
|
2022-03-31T06:19:23.000Z
|
mindware/components/models/regression/liblinear_svr.py
|
aman-gupta-1995/Machine-Learning-Mindware
|
8b3050720711730520683c89949e3dbdfb168961
|
[
"MIT"
] | 4
|
2021-07-15T12:17:10.000Z
|
2022-01-26T17:16:58.000Z
|
mindware/components/models/regression/liblinear_svr.py
|
aman-gupta-1995/Machine-Learning-Mindware
|
8b3050720711730520683c89949e3dbdfb168961
|
[
"MIT"
] | 17
|
2020-05-12T20:24:50.000Z
|
2021-07-11T03:31:38.000Z
|
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import UniformFloatHyperparameter, \
CategoricalHyperparameter, Constant
from ConfigSpace.forbidden import ForbiddenEqualsClause, \
ForbiddenAndConjunction
import numpy as np
from mindware.components.utils.constants import *
from mindware.components.utils.configspace_utils import check_for_bool
from mindware.components.models.base_model import BaseRegressionModel
class LibLinear_SVR(BaseRegressionModel):
# Liblinear is not deterministic as it uses a RNG inside
def __init__(self, epsilon, loss, dual, tol, C,
fit_intercept, intercept_scaling,
random_state=None):
self.epsilon = epsilon
self.loss = loss
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.random_state = random_state
self.estimator = None
def fit(self, X, Y):
from sklearn.svm import LinearSVR
# In case of nested loss
if isinstance(self.loss, dict):
combination = self.loss
self.loss = combination['loss']
self.dual = combination['dual']
self.epsilon = float(self.epsilon)
self.C = float(self.C)
self.tol = float(self.tol)
self.dual = check_for_bool(self.dual)
self.fit_intercept = check_for_bool(self.fit_intercept)
self.intercept_scaling = float(self.intercept_scaling)
self.estimator = LinearSVR(epsilon=self.epsilon,
loss=self.loss,
dual=self.dual,
tol=self.tol,
C=self.C,
fit_intercept=self.fit_intercept,
intercept_scaling=self.intercept_scaling,
random_state=self.random_state)
self.estimator.fit(X, Y)
return self
def predict(self, X):
if self.estimator is None:
raise NotImplementedError()
return self.estimator.predict(X)
@staticmethod
def get_properties(dataset_properties=None):
return {'shortname': 'Liblinear-SVR',
'name': 'Liblinear Support Vector Regression',
'handles_regression': True,
'handles_classification': False,
'handles_multiclass': False,
'handles_multilabel': False,
'is_deterministic': False,
'input': (SPARSE, DENSE, UNSIGNED_DATA),
'output': (PREDICTIONS,)}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None, optimizer='smac'):
if optimizer == 'smac':
cs = ConfigurationSpace()
epsilon = CategoricalHyperparameter("epsilon", [1e-4, 1e-3, 1e-2, 1e-1, 1], default_value=1e-4)
loss = CategoricalHyperparameter(
"loss", ["epsilon_insensitive", "squared_epsilon_insensitive"], default_value="epsilon_insensitive")
dual = CategoricalHyperparameter("dual", ['True', 'False'], default_value='True')
tol = UniformFloatHyperparameter(
"tol", 1e-5, 1e-1, default_value=1e-4, log=True)
C = UniformFloatHyperparameter(
"C", 0.03125, 32768, log=True, default_value=1.0)
fit_intercept = Constant("fit_intercept", "True")
intercept_scaling = Constant("intercept_scaling", 1)
cs.add_hyperparameters([epsilon, loss, dual, tol, C,
fit_intercept, intercept_scaling])
dual_and_loss = ForbiddenAndConjunction(
ForbiddenEqualsClause(dual, "False"),
ForbiddenEqualsClause(loss, "epsilon_insensitive")
)
cs.add_forbidden_clause(dual_and_loss)
return cs
elif optimizer == 'tpe':
from hyperopt import hp
space = {'loss': hp.choice('liblinear_combination', [{'loss': "epsilon_insensitive", 'dual': "True"},
{'loss': "squared_epsilon_insensitive",
'dual': "True"},
{'loss': "squared_epsilon_insensitive",
'dual': "False"}]),
'dual': None,
'tol': hp.loguniform('liblinear_tol', np.log(1e-5), np.log(1e-1)),
'C': hp.loguniform('liblinear_C', np.log(0.03125), np.log(32768)),
'fit_intercept': hp.choice('liblinear_fit_intercept', ["True"]),
'intercept_scaling': hp.choice('liblinear_intercept_scaling', [1])}
init_trial = {'loss': {'loss': "epsilon_insensitive", 'dual': "True"},
'tol': 1e-4,
'C': 1,
'fit_intercept': "True",
'intercept_scaling': 1}
return space
| 44.381356
| 116
| 0.554898
|
b1f689a2ad2c7d4ca28c6d1e837457c3c824cc82
| 1,095
|
py
|
Python
|
common/migrations/0037_inkinddonation.py
|
baylee-d/cos.io
|
3f88acb0feb7a167bf9e81c42e28f9d2d38bbd43
|
[
"Apache-2.0"
] | null | null | null |
common/migrations/0037_inkinddonation.py
|
baylee-d/cos.io
|
3f88acb0feb7a167bf9e81c42e28f9d2d38bbd43
|
[
"Apache-2.0"
] | null | null | null |
common/migrations/0037_inkinddonation.py
|
baylee-d/cos.io
|
3f88acb0feb7a167bf9e81c42e28f9d2d38bbd43
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-02 13:44
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
import wagtail.wagtailsearch.index
class Migration(migrations.Migration):
dependencies = [
('common', '0036_auto_20161101_1847'),
]
operations = [
migrations.CreateModel(
name='InkindDonation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('thank_you_message', models.TextField()),
('organization', modelcluster.fields.ParentalKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='inkind_donations', to='common.Organization', verbose_name='Organization')),
],
options={
'ordering': ['date'],
},
bases=(models.Model, wagtail.wagtailsearch.index.Indexed),
),
]
| 34.21875
| 222
| 0.629224
|
b51245d6fcd79004212670109d8ce6400a10d406
| 495
|
py
|
Python
|
53/53.py
|
bobismijnnaam/bobe-euler
|
111abdf37256d19c4a8c4e1a071db52929acf9d9
|
[
"MIT"
] | null | null | null |
53/53.py
|
bobismijnnaam/bobe-euler
|
111abdf37256d19c4a8c4e1a071db52929acf9d9
|
[
"MIT"
] | null | null | null |
53/53.py
|
bobismijnnaam/bobe-euler
|
111abdf37256d19c4a8c4e1a071db52929acf9d9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from Utils import *
def factorial(n):
if n == 0: return 1
return n * factorial(n - 1)
def nCr(n, r):
assert(r <= n)
return factorial(n) / (factorial(r) * factorial(n - r))
if __name__ == "__main__":
# Your code here!
count = 0
for n in range(1, 100 + 1):
print("Checking", n)
print("Progress", count)
for r in range(1, n + 1):
if nCr(n, r) > 1000000:
count += 1
print("Count", count)
| 21.521739
| 59
| 0.523232
|
e7af09a76cbca0325b408647b37f4dad45789e2b
| 519
|
py
|
Python
|
backend/run.py
|
Fungramming/fonters
|
7d2f024dbb8c996e529b9189afd542780d3d0b67
|
[
"MIT"
] | 2
|
2019-07-10T00:36:54.000Z
|
2019-07-10T00:36:57.000Z
|
backend/run.py
|
Fungramming/fonters
|
7d2f024dbb8c996e529b9189afd542780d3d0b67
|
[
"MIT"
] | null | null | null |
backend/run.py
|
Fungramming/fonters
|
7d2f024dbb8c996e529b9189afd542780d3d0b67
|
[
"MIT"
] | 1
|
2019-07-10T00:37:03.000Z
|
2019-07-10T00:37:03.000Z
|
from flask import Flask, request, jsonify
from flask_cors import CORS
from tinydb import TinyDB, Query
application = Flask(__name__)
app = application
cors = CORS(app, resources={"*": {"origins":"*"}})
@app.route('/', methods=['PUT'])
def save_email():
try:
name, email = request.get_json().get('name'), request.get_json().get('email')
db = TinyDB('db.json')
db.insert({'name': name, 'email': email})
return jsonify({'result': 'success'}), 201
except:
return jsonify({'result': 'error'}), 401
| 28.833333
| 81
| 0.655106
|
11af4932e9d2acbc94ed2be7ddcbe8b75256a292
| 695
|
py
|
Python
|
cinder/image/__init__.py
|
alexpilotti/cinder
|
df2f070604dad61738ccd3113016f76f2af20cae
|
[
"Apache-2.0"
] | null | null | null |
cinder/image/__init__.py
|
alexpilotti/cinder
|
df2f070604dad61738ccd3113016f76f2af20cae
|
[
"Apache-2.0"
] | null | null | null |
cinder/image/__init__.py
|
alexpilotti/cinder
|
df2f070604dad61738ccd3113016f76f2af20cae
|
[
"Apache-2.0"
] | null | null | null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
| 40.882353
| 79
| 0.709353
|
a59f3b12efee1adf5dd3e0f25beab6410ae72e91
| 31,373
|
py
|
Python
|
stix_shifter_modules/guardium/stix_translation/query_constructor.py
|
nkhetia31/stix-shifter
|
ace07581cb227fd35e450b2f8871475227a041d0
|
[
"Apache-2.0"
] | null | null | null |
stix_shifter_modules/guardium/stix_translation/query_constructor.py
|
nkhetia31/stix-shifter
|
ace07581cb227fd35e450b2f8871475227a041d0
|
[
"Apache-2.0"
] | 42
|
2022-01-28T04:35:38.000Z
|
2022-03-31T04:31:04.000Z
|
stix_shifter_modules/guardium/stix_translation/query_constructor.py
|
subbyte/stix-shifter
|
36d71c172a5fc5b97d872e623753b0dd1bf4fe6c
|
[
"Apache-2.0"
] | null | null | null |
import re
import json
import datetime
import copy
from stix_shifter_utils.stix_translation.src.patterns.pattern_objects import ObservationExpression, ComparisonExpression, \
ComparisonExpressionOperators, ComparisonComparators, Pattern, \
CombinedComparisonExpression, CombinedObservationExpression, ObservationOperators
from stix_shifter_modules.guardium.stix_translation.transformers import TimestampToGuardium
from stix_shifter_utils.stix_translation.src.json_to_stix import observable
from stix_shifter_utils.utils.file_helper import read_json
from stix_shifter_utils.utils import logger
DEFAULT_DAYS_BACK = 2
class QueryStringPatternTranslator:
def __init__(self, pattern: Pattern, data_model_mapper, options, transformers):
self.dmm = data_model_mapper
self.comparator_lookup = self.dmm.map_comparator()
self.pattern = pattern
self.logger = logger.set_logger(__name__)
# Now report_params_passed is a JSON object which is pointing to an array of JSON Objects (report_params_array)
self.report_params_passed = {}
self.report_params_array = []
self.report_params_array_size = 0
# Now qsearch_params_passed is a JSON object which is pointing to an array of JSON Objects (qsearch_params_array)
self.qsearch_params_passed = {}
self.qsearch_params_array = []
self.qsearch_params_array_size = 0
self.translated = self.parse_expression(pattern)
self.transformers = transformers
# Read report definition data
self.REPORT_DEF = read_json('guardium_reports_def', options)
# Read report definition data
self.REPORT_PARAMS_MAP = read_json('guardium_report_params_map', options)
# Read qsearch definition data
self.QSEARCH_DEF = read_json('guardium_qsearch_def', options)
# Read qsearch definition data
self.QSEARCH_PARAMS_MAP = read_json('guardium_qsearch_params_map', options)
def set_report_params_passed(self, params_array):
self.report_params_array = params_array
self.report_params_array_size = len(params_array)
return
def set_qsearch_params_passed(self, params_array):
self.qsearch_params_array = params_array
self.qsearch_params_array_size = len(params_array)
return
def transform_report_call_to_json(self, report_call):
# Convert the report call (string) into an array of JSON. Note, inside each json obj multiple key/value parmeter are "OR"
# Where as each key/value parameter from two json objects are "AND"
# Put quote around key
# print(report_call)
regex = r"([a-zA-Z_]+)(\s=)"
out_str = re.sub(regex, r"'\1' :", report_call, 0)
# Create the Json structure
regex1 = r"\(|\)"
out_str = re.sub(regex1, "", out_str, 0)
regex2 = r"\sAND\s"
out_str = "{" + re.sub(regex2, "} AND {", out_str, 0) + "}"
regex3 = r"START"
out_str = re.sub(regex3, "} AND {START ", out_str, 0)
# treat START and STOP parameters too
regex4 = r"(START|STOP)"
out_str = re.sub(regex4, r"'\1' : ", out_str, 0)
regex5 = r"([Z\'\s]+STOP)"
out_str = re.sub(regex5, r"'} AND {'STOP", out_str, 0)
regex6 = r"(START|STOP)\'[\s\:t\']+"
out_str = re.sub(regex6, r"\1' : '", out_str, 0)
# Finalize the structure -- replace by comma and then it becomes string containing
# an array of Json objects
regex7 = r"\sOR|\sAND"
out_str = re.sub(regex7, r",", out_str, 0)
# Single quotes have to be replaced by double quotes in order to make it as an Json obj
regex8 = r"'"
out_str = "[" + re.sub(regex8, '"', out_str, 0) + "]"
return json.loads(out_str)
def transform_qsearch_call_to_json(self, qsearch_call):
# Convert the report call (string) into an array of JSON. Note, inside each json obj multiple key/value parmeter are "OR"
# Where as each key/value parameter from two json objects are "AND"
# Put quote around key
# print(report_call)
regex = r"(^|\(|OR |AND )([a-zA-Z_ ]+)(\s=)"
out_str = re.sub(regex, r"\1'\2' :", qsearch_call, 0)
# Create the Json structure
regex1 = r"\(|\)"
out_str = re.sub(regex1, "", out_str, 0)
regex2 = r"\sAND\s"
out_str = "{" + re.sub(regex2, "} AND {", out_str, 0) + "}"
regex3 = r"START"
out_str = re.sub(regex3, "} AND {START ", out_str, 0)
# treat START and STOP parameters too
regex4 = r"(START|STOP)"
out_str = re.sub(regex4, r"'\1' : ", out_str, 0)
regex5 = r"([Z\'\s]+STOP)"
out_str = re.sub(regex5, r"'} AND {'STOP", out_str, 0)
regex6 = r"(START|STOP)\'[\s\:t\']+"
out_str = re.sub(regex6, r"\1' : '", out_str, 0)
# Finalize the structure -- replace by comma and then it becomes string containing
# an array of Json objects
regex7 = r"\sOR|\sAND"
out_str = re.sub(regex7, r",", out_str, 0)
# Single quotes have to be replaced by double quotes in order to make it as an Json obj
regex8 = r"'"
out_str = "[" + re.sub(regex8, '"', out_str, 0) + "]"
return json.loads(out_str)
# Guardium report parameters are "AND"ed in a Gaurdium query.
# Our Json object array contains multiple json objects. Each object may have one or many key/value pairs -- these are report params
# Problem statement: get an array of json objects containing parameters which support a guardium report call
def build_array_of_guardium_report_params(self, result_array, result_position, current_result_object, params_array, current_position):
param_list_size = len(params_array)
if current_result_object is None:
current_result_object = {}
if current_position is None:
current_position = 0
else:
current_position = current_position + 1
if current_position < param_list_size:
param_json_object = params_array[current_position]
for param in param_json_object:
# Keep a copy of current_result_object before any modification from this invocation
cp_current_result_object = copy.deepcopy(current_result_object)
# Insert the param in the current_result_object
if param not in cp_current_result_object:
cp_current_result_object[param] = param_json_object[param]
if (current_position + 1) < param_list_size:
result_array = self.build_array_of_guardium_report_params(result_array, result_position, cp_current_result_object, params_array, current_position)
else:
result_array.append(cp_current_result_object)
result_position = result_position + 1
return result_array
def build_array_of_guardium_qsearch_params(self, result_array, result_position, current_result_object, params_array, current_position):
param_list_size = len(params_array)
if current_result_object is None:
current_result_object = {}
if current_position is None:
current_position = 0
else:
current_position = current_position + 1
if current_position < param_list_size:
param_json_object = params_array[current_position]
for param in param_json_object:
# Keep a copy of current_result_object before any modification from this invocation
cp_current_result_object = copy.deepcopy(current_result_object)
# Insert the param in the current_result_object
if param not in cp_current_result_object:
cp_current_result_object[param] = param_json_object[param]
if (current_position + 1) < param_list_size:
result_array = self.build_array_of_guardium_qsearch_params(result_array, result_position, cp_current_result_object, params_array, current_position)
else:
result_array.append(cp_current_result_object)
result_position = result_position + 1
return result_array
def substitute_params_passed(self, report_definitions, reports_in_query):
# for Each report in report_definitions substitute params for report Params Passed
# generate all reports for the query
# In the event START and STOP is missing, Generate the default From and To Dates
# TO_DATE IS SET TO NOW
# FROM_DATE IS SET TO DAYS FROM NOW
current_date = datetime.datetime.now()
default_to_date = current_date.strftime(('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z')
default_from_date = (current_date - datetime.timedelta(days=DEFAULT_DAYS_BACK)).strftime(('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z')
for report_name in report_definitions:
report = report_definitions[report_name]
for param in report["reportParameter"]:
# either the value will be default or passed in (report parameter passed)
if param not in self.report_params_passed:
value = report["reportParameter"][param]["default"]
else:
value = self.report_params_passed[param]
# Use START and STOP instead of default to time parameter
if report["reportParameter"][param]["info"] == "START":
value = self.report_params_passed.get("START", default_from_date)
if report["reportParameter"][param]["info"] == "STOP":
value = self.report_params_passed.get("STOP", default_to_date)
# Transform the value or use it as-is
if "transformer" in report["reportParameter"][param]:
transformer = self.transformers[report["reportParameter"][param]["transformer"]]
report["reportParameter"][param] = transformer.transform(value)
else:
report["reportParameter"][param] = value
reports_in_query.append(json.dumps(report))
return reports_in_query
def substitute_qsearch_params_passed(self, qsearch_definitions, qsearch_in_query):
# for Each report in report_definitions substitute params for report Params Passed
# generate all reports for the query
# In the event START and STOP is missing, Generate the default From and To Dates
# TO_DATE IS SET TO NOW
# FROM_DATE IS SET TO DAYS FROM NOW
current_date = datetime.datetime.now()
default_to_date = current_date.strftime(('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z')
default_from_date = (current_date - datetime.timedelta(days=DEFAULT_DAYS_BACK)).strftime(('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z')
for qsearch_name in qsearch_definitions:
qsearch = qsearch_definitions[qsearch_name]
sta = self.qsearch_params_passed.get("START", default_from_date)
sto = self.qsearch_params_passed.get("STOP", default_to_date)
qsearch["startTime"] = self.transformers[qsearch["startTime"]["transformer"]].transform(sta)
qsearch["endTime"] = self.transformers[qsearch["endTime"]["transformer"]].transform(sto)
for param in qsearch["filters"]:
# either the value will be default or passed in (report parameter passed)
if param not in self.qsearch_params_passed:
value = qsearch["filters"][param]["default"]
else:
value = self.qsearch_params_passed[param]
# Transform the value or use it as-is
if "transformer" in qsearch["filters"][param]:
transformer = self.transformers[qsearch["filters"][param]["transformer"]]
qsearch["filters"][param] = transformer.transform(value)
else:
qsearch["filters"][param] = value
for param in qsearch["query"]:
if param in self.qsearch_params_passed:
value = self.qsearch_params_passed[param]
if "transformer" in qsearch["query"][param]:
transformer = self.transformers[qsearch["query"][param]["transformer"]]
qsearch["query"][param]["value"] = transformer.transform(value)
qsearch["query"][param]["operation"] = qsearch["query"][param]["default_operator"]
qsearch_in_query.append(json.dumps(qsearch))
return qsearch_in_query
def get_report_params(self):
reports_in_query = []
for report_param_index in range(self.report_params_array_size):
self.report_params_passed = self.report_params_array[report_param_index]
data_category = (self.report_params_passed).get("datacategory", None)
if(data_category is not None):
if data_category not in self.REPORT_DEF:
report_definitions = None
else:
report_definitions = copy.deepcopy(self.REPORT_DEF[data_category])
else:
report_definitions = self.generate_report_definitions()
# substitute Params
if report_definitions:
reports_in_query = self.substitute_params_passed(report_definitions, reports_in_query)
return reports_in_query
def get_qsearch_params(self):
qsearch_in_query = []
for qsearch_param_index in range(self.qsearch_params_array_size):
self.qsearch_params_passed = self.qsearch_params_array[qsearch_param_index]
#clientip = self.qsearch_params_passed.get("Client", None)
#if clientip is not None:
# continue
data_category = self.qsearch_params_passed.get("datacategory", None)
if data_category is not None:
if data_category not in self.QSEARCH_DEF:
qsearch_definitions = None
else:
qsearch_definitions = copy.deepcopy(self.QSEARCH_DEF[data_category])
else:
qsearch_definitions = self.generate_qsearch_definitions()
# substitute Params
if qsearch_definitions:
qsearch_in_query = self.substitute_qsearch_params_passed(qsearch_definitions, qsearch_in_query)
self.set_filters_format(qsearch_in_query)
self.set_query_format(qsearch_in_query)
return qsearch_in_query
def set_filters_format(self, qse):
for i in range(len(qse)):
filters = json.loads(qse[i])["filters"]
qse_prefix = qse[i][0:str.find(qse[i], "filters") - 1:1]
qse_suffix = qse[i][str.find(qse[i], ", \"query")::1]
str_filters = ''
first = True
for key in filters:
if filters[key] == '*':
continue
if first:
first = False
else:
str_filters = str_filters + "&"
str_filters = str_filters + "name=" + key + "&" + "value=" + filters[key] + "&isGroup=false"
if str_filters.__len__() > 0:
str_filters = "\"filters\":\"" + str_filters + "\""
qse[i] = qse_prefix + str_filters + qse_suffix
else:
qse[i] = qse_prefix + qse_suffix[2::1]
def set_query_format(self, qse):
for i in range(len(qse)):
query = json.loads(qse[i])["query"]
qse_prefix = qse[i][0:str.find(qse[i], "query") - 1:1]
qse_suffix = qse[i][str.find(qse[i], ", \"fetchSize")::1]
str_query = ''
first = True
for key in query:
if "value" not in query[key]:
continue
if first:
first = False
else:
str_query = str_query + " AND "
str_query = str_query + key + query[key]["operation"] +query[key]["value"]
if str_query.__len__() > 0:
str_query = "\"query\":\"" + str_query + "\""
qse[i] = qse_prefix + str_query + qse_suffix
else:
qse[i] = qse_prefix + qse_suffix[2::1]
def generate_report_definitions(self):
# for Each param passed get all reports pertaining to that params -- this is a set of param reports
# then take intersection of each set
# if the intersection is null use the default Category
report_set = None
param_map = self.REPORT_PARAMS_MAP["maps"]
param_cmn = self.REPORT_PARAMS_MAP["common"]
for param in self.report_params_passed:
if param in param_map:
param_set = set(param_map[param])
elif param in param_cmn:
param_set = set(self.REPORT_PARAMS_MAP["defaultReports"])
else:
param_set = None
# find interaction
# param_set
if param_set is not None:
if report_set is None:
report_set = set(param_set)
else:
report_set = report_set.intersection(param_set)
# Check if report_set is null
if (not bool(report_set)):
report_set = self.REPORT_PARAMS_MAP["defaultReports"]
# Now we have to create report_definitions from this report_set
# Report set --> data_category:report_name
# Iterate through report_definitions and pick the reports and place them in the report Defs
#
report_definitions = {}
for key in report_set:
data_category, report = key.split(":")
'''if data_category not in self.REPORT_DEF:
raise RuntimeError(
"Error in parameter mapping file (data category): " + str(data_category) + " not there. Ingored.")
else:'''
data_category_reports = copy.deepcopy(self.REPORT_DEF[data_category])
'''if report not in data_category_reports:
raise RuntimeError(
"Error in parameter mapping file (report name): " + str(report) + " not there. Ingored.")
else:'''
report_definitions[report] = data_category_reports[report]
return report_definitions
def generate_qsearch_definitions(self):
# for Each param passed get all qsearchs pertaining to that params -- this is a set of param qsearchs
# then take intersection of each set
# if the intersection is null use the default Category
qsearch_set = None
param_map = self.QSEARCH_PARAMS_MAP["maps"]
param_cmn = self.QSEARCH_PARAMS_MAP["common"]
for param in self.qsearch_params_passed:
if param in param_map:
param_set = set(param_map[param])
elif param in param_cmn:
param_set = set(self.QSEARCH_PARAMS_MAP["defaultQsearch"])
else:
param_set = None
# find interaction
# param_set
if param_set is not None:
if qsearch_set is None:
qsearch_set = set(param_set)
else:
qsearch_set = qsearch_set.intersection(param_set)
# Check if qsearch_set is null
if (not bool(qsearch_set)):
qsearch_set = self.QSEARCH_PARAMS_MAP["defaultQsearch"]
# Now we have to create qsearch_definitions from this qsearch_set
# Qsearch set --> data_category:qsearch_name
# Iterate through qsearch_definitions and pick the qsearchs and place them in the qsearch Defs
#
qsearch_definitions = {}
for key in qsearch_set:
data_category, qsearch = key.split(":")
''' if data_category not in self.QSEARCH_DEF:
raise RuntimeError(
"Error in parameter mapping file (data category): " + str(
data_category) + " not there. Ingored.")
else:'''
data_category_qsearch = copy.deepcopy(self.QSEARCH_DEF[data_category])
'''if qsearch not in data_category_qsearch:
raise RuntimeError(
"Error in parameter mapping file (qsearch name): " + str(qsearch) + " not there. Ingored.")
else: '''
qsearch_definitions[qsearch] = data_category_qsearch[qsearch]
return qsearch_definitions
@staticmethod
def _format_set(values) -> str:
gen = values.element_iterator()
return "({})".format(' OR '.join([QueryStringPatternTranslator._escape_value(value) for value in gen]))
@staticmethod
def _format_match(value) -> str:
raw = QueryStringPatternTranslator._escape_value(value)
if raw[0] == "^":
raw = raw[1:]
else:
raw = ".*" + raw
if raw[-1] == "$":
raw = raw[0:-1]
else:
raw = raw + ".*"
return "\'{}\'".format(raw)
@staticmethod
def _format_equality(value) -> str:
return '\'{}\''.format(value)
@staticmethod
def _format_like(value) -> str:
value = "'%{value}%'".format(value=value)
return QueryStringPatternTranslator._escape_value(value)
@staticmethod
def _escape_value(value, comparator=None) -> str:
if isinstance(value, str):
return '{}'.format(value.replace('\\', '\\\\').replace('\"', '\\"').replace('(', '\\(').replace(')', '\\)'))
else:
return value
@staticmethod
def _negate_comparison(comparison_string):
return "NOT({})".format(comparison_string)
@staticmethod
def _check_value_type(value):
value = str(value)
for key, pattern in observable.REGEX.items():
if key != 'date' and bool(re.search(pattern, value)):
return key
return None
@staticmethod
def _parse_mapped_fields(self, expression, value, comparator, stix_field, mapped_fields_array):
comparison_string = ""
is_reference_value = self._is_reference_value(stix_field)
# Need to use expression.value to match against regex since the passed-in value has already been formated.
value_type = self._check_value_type(expression.value) if is_reference_value else None
mapped_fields_count = 1 if is_reference_value else len(mapped_fields_array)
for mapped_field in mapped_fields_array:
if is_reference_value:
parsed_reference = "{mapped_field} {comparator} {value}".format(mapped_field=mapped_field, comparator=comparator, value=value)
if not parsed_reference:
continue
comparison_string += parsed_reference
else:
comparison_string += "{mapped_field} {comparator} {value}".format(mapped_field=mapped_field, comparator=comparator, value=value)
#self.report_params_passed[mapped_field] = str(value).replace("'","",10)
if (mapped_fields_count > 1):
comparison_string += " OR "
mapped_fields_count -= 1
return comparison_string
@staticmethod
def _is_reference_value(stix_field):
return stix_field == 'src_ref.value' or stix_field == 'dst_ref.value'
def _parse_expression(self, expression, qualifier=None) -> str:
if isinstance(expression, ComparisonExpression): # Base Case
# Resolve STIX Object Path to a field in the target Data Model
stix_object, stix_field = expression.object_path.split(':')
# Multiple data source fields may map to the same STIX Object
mapped_fields_array = self.dmm.map_field(stix_object, stix_field)
# Resolve the comparison symbol to use in the query string (usually just ':')
comparator = self.comparator_lookup[str(expression.comparator)]
if stix_field == 'start' or stix_field == 'end':
transformer = TimestampToGuardium()
expression.value = transformer.transform(expression.value)
# Some values are formatted differently based on how they're being compared
if expression.comparator == ComparisonComparators.Matches: # needs forward slashes
value = self._format_match(expression.value)
# should be (x, y, z, ...)
elif expression.comparator == ComparisonComparators.In:
value = self._format_set(expression.value)
elif expression.comparator == ComparisonComparators.Equal or expression.comparator == ComparisonComparators.NotEqual:
# Should be in single-quotes
value = self._format_equality(expression.value)
# '%' -> '*' wildcard, '_' -> '?' single wildcard
elif expression.comparator == ComparisonComparators.Like:
value = self._format_like(expression.value)
else:
value = self._escape_value(expression.value)
comparison_string = self._parse_mapped_fields(self, expression, value, comparator, stix_field, mapped_fields_array)
if(len(mapped_fields_array) > 1 and not self._is_reference_value(stix_field)):
# More than one data source field maps to the STIX attribute, so group comparisons together.
grouped_comparison_string = "(" + comparison_string + ")"
comparison_string = grouped_comparison_string
if expression.comparator == ComparisonComparators.NotEqual:
comparison_string = self._negate_comparison(comparison_string)
if expression.negated:
comparison_string = self._negate_comparison(comparison_string)
if qualifier is not None:
return "{} {}".format(comparison_string, qualifier)
else:
return "{}".format(comparison_string)
elif isinstance(expression, CombinedComparisonExpression):
operator = self.comparator_lookup[str(expression.operator)]
expression_01 = self._parse_expression(expression.expr1)
expression_02 = self._parse_expression(expression.expr2)
if not expression_01 or not expression_02:
return ''
if isinstance(expression.expr1, CombinedComparisonExpression):
expression_01 = "({})".format(expression_01)
if isinstance(expression.expr2, CombinedComparisonExpression):
expression_02 = "({})".format(expression_02)
query_string = "{} {} {}".format(expression_01, operator, expression_02)
if qualifier is not None:
return "{} {}".format(query_string, qualifier)
else:
return "{}".format(query_string)
elif isinstance(expression, ObservationExpression):
return self._parse_expression(expression.comparison_expression, qualifier)
elif hasattr(expression, 'qualifier') and hasattr(expression, 'observation_expression'):
if isinstance(expression.observation_expression, CombinedObservationExpression):
operator = self.comparator_lookup[str(expression.observation_expression.operator)]
# qualifier only needs to be passed into the parse expression once since it will be the same for both expressions
return "{expr1} {operator} {expr2}".format(expr1=self._parse_expression(expression.observation_expression.expr1),
operator=operator,
expr2=self._parse_expression(expression.observation_expression.expr2, expression.qualifier))
else:
return self._parse_expression(expression.observation_expression.comparison_expression, expression.qualifier)
elif isinstance(expression, CombinedObservationExpression):
operator = self.comparator_lookup[str(expression.operator)]
expression_01 = self._parse_expression(expression.expr1)
expression_02 = self._parse_expression(expression.expr2)
if expression_01 and expression_02:
return "({}) {} ({})".format(expression_01, operator, expression_02)
elif expression_01:
return "{}".format(expression_01)
elif expression_02:
return "{}".format(expression_02)
else:
return ''
elif isinstance(expression, Pattern):
return "{expr}".format(expr=self._parse_expression(expression.expression))
else:
raise RuntimeError("Unknown Recursion Case for expression={}, type(expression)={}".format(
expression, type(expression)))
def parse_expression(self, pattern: Pattern):
return self._parse_expression(pattern)
def translate_pattern(pattern: Pattern, data_model_mapping, options, transformers):
# Converting query object to datasource query
# timerange set to 24 hours for Guardium; timerange is provided in minutes (as delta)
guardium_query_translator = QueryStringPatternTranslator(pattern, data_model_mapping, options, transformers)
report_call = guardium_query_translator.translated
# Add space around START STOP qualifiers
report_call = re.sub("START", "START ", report_call)
report_call = re.sub("STOP", " STOP ", report_call)
# Subroto: I did not change the code much just adapted to get the report parameters
# Subroto: added code to support report search parameters are "and" when sent to Guardium
# translate the structure of report_call
if data_model_mapping.dialect == 'report':
json_report_call = guardium_query_translator.transform_report_call_to_json(report_call)
else:
json_qsearch_call = guardium_query_translator.transform_qsearch_call_to_json(report_call)
result_array = []
result_position = 0
if data_model_mapping.dialect == 'report':
output_array = guardium_query_translator.build_array_of_guardium_report_params(result_array, result_position, None, json_report_call, None)
guardium_query_translator.set_report_params_passed(output_array)
report_header = guardium_query_translator.get_report_params()
else:
output_array = guardium_query_translator.build_array_of_guardium_qsearch_params(result_array, result_position, None, json_qsearch_call, None)
guardium_query_translator.set_qsearch_params_passed(output_array)
report_header = guardium_query_translator.get_qsearch_params()
if report_header:
# Change return statement as required to fit with data source query language.
# If supported by the language, a limit on the number of results may be desired.
# A single query string, or an array of query strings may be returned
return report_header
else:
# report_header = {"ID": 2000, "message": "Could not generate query -- issue with data_category."}
return report_header
| 48.943838
| 171
| 0.629076
|
5b76d9b962462d851e44afa840cefa5e34788c7e
| 2,595
|
py
|
Python
|
redleader/managers/codedeploy.py
|
mmcdermo/RedLeader
|
13c5261e839fc3373a8de00d49187220778091a4
|
[
"Apache-2.0"
] | null | null | null |
redleader/managers/codedeploy.py
|
mmcdermo/RedLeader
|
13c5261e839fc3373a8de00d49187220778091a4
|
[
"Apache-2.0"
] | null | null | null |
redleader/managers/codedeploy.py
|
mmcdermo/RedLeader
|
13c5261e839fc3373a8de00d49187220778091a4
|
[
"Apache-2.0"
] | null | null | null |
import os.path
import os
import time
import tarfile
import random
import redleader.util as util
class CodeDeployManager(object):
def __init__(self, context):
self._context = context
def create_code_deploy_package(self, path):
n = "code_deploy_package%s.tgz" % random.randrange(0, 10000000)
self.make_tarfile("./%s" % n, path)
return n
@staticmethod
def make_tarfile(output_filename, source_dir):
with tarfile.open(output_filename, "w:gz") as tar:
files = os.listdir(source_dir)
for f in files:
tar.add(os.path.join(source_dir, f), arcname=f)
def upload_package(self, bucket_name, path, name):
print("Uploading %s to bucket %s/%s" % (path, bucket_name, name))
client = self._context.get_client('s3')
f = client.upload_file(path, bucket_name, name)
# TODO. Configure bucket permissions so that an
# IAM policy with s3::GetObject is sufficient.
# Then we can remove setting ACL to authenticated-read
client.put_object_acl(Bucket=bucket_name, Key=name,
ACL='authenticated-read')
return f
def create_deployment(self, application_name, deployment_group_name,
path, bucket_name, version="0", blocking=True, verbose=True):
package = self.create_code_deploy_package(path)
x = self.upload_package(bucket_name, "./%s" % package, package)
client = self._context.get_client('codedeploy')
res = client.create_deployment(
applicationName=application_name,
deploymentGroupName=deployment_group_name,
revision={
'revisionType': 'S3',
's3Location': {
'bucket': bucket_name,
'key': package,
'bundleType': 'tgz',
}
})
os.remove(package)
deploymentId = res['deploymentId']
if not blocking:
return deploymentId
i = 0
if verbose:
print("Deploying..")
while client.get_deployment(deploymentId=deploymentId)['deploymentInfo']['status'] in ["Created", "InProgress"]:
i += 1
if verbose:
util.print_progress(i)
time.sleep(2)
if verbose:
print("Deployment Status: %s" % client.get_deployment(deploymentId=deploymentId)['deploymentInfo']['status'])
return (deploymentId, client.get_deployment(deploymentId=deploymentId)['deploymentInfo']['status'])
| 38.731343
| 121
| 0.603468
|
4a472726180cd7f56cfc984d3ea57f4391b97996
| 2,923
|
py
|
Python
|
helpme.py
|
buckbaskin/paPYrus
|
815858715beb74814699277ae0377894d4f54f5b
|
[
"MIT"
] | null | null | null |
helpme.py
|
buckbaskin/paPYrus
|
815858715beb74814699277ae0377894d4f54f5b
|
[
"MIT"
] | null | null | null |
helpme.py
|
buckbaskin/paPYrus
|
815858715beb74814699277ae0377894d4f54f5b
|
[
"MIT"
] | null | null | null |
from pylatex import Document, Section, Subsection, Subsubsection, Command, UnsafeCommand
from pylatex.utils import italic, NoEscape
default_package_list = [
'parskip',
# \usepackage[utf8]{inputenc}
# \usepackage[english]{babel}
'listings',
'color',
'verbatim',
'soul',
# \usepackage[margin=0.69in]{geometry}
'amsmath',
'amssymb',
'amsthm',
'gensymb',
'graphicx',
]
def stuff(doc, content):
doc.append(NoEscape(content))
def myusual(doc):
stuff(doc.preamble, NoEscape(r'\usepackage[english]{babel}'))
stuff(doc.preamble, NoEscape(r'\usepackage[margin=0.69in]{geometry}'))
for package in default_package_list:
doc.preamble.append(Command('usepackage', package))
stuff(doc.preamble, NoEscape(r'\definecolor{dkgreen}{rgb}{0, 0.6, 0}'))
stuff(doc.preamble, NoEscape(r'\definecolor{gray}{rgb}{0.5, 0.5, 0.5}'))
stuff(doc.preamble, NoEscape(r'\definecolor{mauve}{rgb}{0.58, 0, 0.82}'))
stuff(doc.preamble, NoEscape(
'''\\lstset{frame=tb,
language=Matlab,
aboveskip=3mm,
belowskip=3mm,
showstringspaces=false,
columns=flexible,
basicstyle={\\small\\ttfamily},
numbers=none,
numberstyle=\\tiny\\color{gray},
keywordstyle=\\color{blue},
commentstyle=\\color{dkgreen},
stringstyle=\\color{mauve},
breaklines=true,
breakatwhitespace=true,
tabsize=3
}'''))
'''
UnsafeCommand('newcommand', '\exampleCommand', options=3,
extra_arguments=r'\color{#1} #2 #3 \color{black}')
>>>
\newcommand{\exampleCommand}[3]{\color{#1} #2 #3 \color{black}}%
'''
'''
\DeclareMathOperator*{\argmax}{arg\,max}
\DeclareMathOperator*{\argmin}{arg\,min}
'''
doc.preamble.append(UnsafeCommand('DeclareMathOperator*', arguments=(r'\argmax', r'arg\,max')))
doc.preamble.append(UnsafeCommand('DeclareMathOperator*', arguments=(r'\argmin', r'arg\,min')))
doc.preamble.append(UnsafeCommand('newcommand', '\subsubsubsection', extra_arguments=('\paragraph',)))
doc.preamble.append(UnsafeCommand('newcommand', r'\bbs', options=1, extra_arguments=('\section{#1}',)))
doc.preamble.append(UnsafeCommand('newcommand', r'\bbbs', options=1, extra_arguments=('\subsection{#1}',)))
doc.preamble.append(UnsafeCommand('newcommand', r'\bbbbs', options=1, extra_arguments=('\subsubsection{#1}',)))
doc.preamble.append(UnsafeCommand('newcommand', r'\bbbbbs', options=1, extra_arguments=('\subsubsubsection{#1}',)))
doc.preamble.append(UnsafeCommand('newcommand', r'\norm', options=1, extra_arguments=(r'\left\lVert#1\right\rVert',)))
def build(doc, filename):
doc.generate_pdf(filename, clean_tex=False)
def section(doc, title):
return doc.create(Section(NoEscape(title)))
def subsection(doc, title):
return doc.create(Subsection(NoEscape(title)))
def subsubsection(doc, title):
return doc.create(Subsubsection(NoEscape(title)))
| 34.797619
| 122
| 0.67807
|
f512272a00402a48af85bb7c1c37824364385541
| 1,387
|
py
|
Python
|
xawscf/commands/invoke.py
|
DmitryBogomolov/aws-cloudformation-sample
|
f0454b203973e07027a4cdf5f36468d137d310fd
|
[
"MIT"
] | null | null | null |
xawscf/commands/invoke.py
|
DmitryBogomolov/aws-cloudformation-sample
|
f0454b203973e07027a4cdf5f36468d137d310fd
|
[
"MIT"
] | 36
|
2018-04-20T06:11:41.000Z
|
2018-07-07T21:55:55.000Z
|
xawscf/commands/invoke.py
|
DmitryBogomolov/aws-cloudformation-sample
|
f0454b203973e07027a4cdf5f36468d137d310fd
|
[
"MIT"
] | null | null | null |
'''
Invokes lambda function.
'''
from logging import getLogger
import json
from ..utils.client import get_client
logger = getLogger(__name__)
def run(pattern, name, payload=None):
lambda_client = get_client(pattern, 'lambda')
function = pattern.get_function(name)
if not function:
logger.info('Function *{}* is unknown.'.format(name))
return 1
kwargs = {'FunctionName': function.full_name}
if payload:
kwargs['Payload'] = payload
try:
response = lambda_client.invoke(**kwargs)
except lambda_client.exceptions.ResourceNotFoundException:
logger.info('Function *{}* is not found.'.format(function.full_name))
return 1
except Exception as err: # pylint: disable=broad-except
logger.exception(err)
return 1
payload = json.loads(response['Payload'].read().decode('utf-8'))
if response.get('FunctionError'):
error_type = payload.get('errorType')
logger.info((error_type + ': ' if error_type else '') + payload.get('errorMessage'))
stack_trace = payload.get('stackTrace')
if stack_trace:
for file_name, line, func, code in stack_trace:
logger.info(' {}, {}, in {}'.format(file_name, line, func))
logger.info(' {}'.format(code))
return 1
logger.info(json.dumps(payload, indent=2))
return 0
| 34.675
| 92
| 0.638068
|
055954a1aaa853cd9b659db8af90d7191a3b212e
| 3,728
|
py
|
Python
|
scripts/run_ycsb.py
|
byrantwithyou/YCSB
|
622bdd11a4c46fbd85600d27b2b1358d74939549
|
[
"Apache-2.0"
] | null | null | null |
scripts/run_ycsb.py
|
byrantwithyou/YCSB
|
622bdd11a4c46fbd85600d27b2b1358d74939549
|
[
"Apache-2.0"
] | null | null | null |
scripts/run_ycsb.py
|
byrantwithyou/YCSB
|
622bdd11a4c46fbd85600d27b2b1358d74939549
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import os
import signal
import argparse
import subprocess
def exec_cmd(cmd, out=None, cwd=None):
p = subprocess.Popen(cmd, shell=True, stdout=out, stderr=out, cwd=cwd)
p.wait()
if p.returncode != 0:
print("command %s is not successful!" % cmd)
exit(1)
return p
def recompile():
global SRC_PATH
exec_cmd("mvn -pl site.ycsb:rocksdb-binding -am clean package", cwd=SRC_PATH)
def run_ycsb():
global SRC_PATH
global args
global DBDIR
exec_cmd("sudo -v")
trace_cmd = "sudo bpftrace -o %s" % args.f + \
" -e ' tracepoint:syscalls:sys_exit_write /strncmp(" + \
'"rocksdb", comm, 7) == 0/ {@ = hist(args->ret) } interval:s:1 { print(@); }' + r"'"
p = subprocess.Popen(trace_cmd, shell=True)
ycsb_cmd = "./bin/ycsb {rl} rocksdb -s -p recordcount={rc} -p operationcount={oc} -P workloads/workload{workload} -p rocksdb.dir={DIR} -threads {C} -p rocksdb.optionsfile=option.ini".format(
rl=args.type, rc=args.r, oc=args.o, workload=args.workload, DIR=DBDIR, C=args.c)
exec_cmd(ycsb_cmd, cwd=SRC_PATH)
exec_cmd("sleep 10")
os.kill(p.pid, signal.SIGINT)
def handle_err():
print("argument invalid!")
exit(1)
def pre_work():
global SRC_PATH
exec_cmd("rm -f option.ini", cwd=SRC_PATH)
def generate_option_file():
global SRC_PATH
global args
exec_cmd("cp defaultoption.ini option.ini", cwd=SRC_PATH)
sstablesize = int(args.sstsize * 1024 * 1024)
cmd = 'sed -i "s/target_file_size_base=.*/target_file_size_base={sstsize}/g" option.ini'.format(
sstsize=sstablesize)
exec_cmd(cmd, cwd=SRC_PATH)
def deal_with_zipfian():
global SRC_PATH
global args
cmd = "sed -i " + \
r"'s/\(.*\)ZIPFIAN_CONSTANT\ .*/\1ZIPFIAN_CONSTANT\ = " + \
"{zip};/g'".format(zip=args.z) + " ZipfianGenerator.java"
ZIP_PATH = os.path.normpath(os.path.join(
SRC_PATH, "core", "src", "main", "java", "site", "ycsb", "generator"))
exec_cmd(cmd, cwd=ZIP_PATH)
if __name__ == "__main__":
FILE_PATH = os.path.abspath(os.path.dirname(__file__))
SRC_PATH = os.path.normpath(os.path.join(FILE_PATH, ".."))
parser = argparse.ArgumentParser()
parser.add_argument("-c", default=1, type=int,
help="the number of client thread")
parser.add_argument(
"workload", help="the workload from YCSB workload, can be one of a,b,c,d,e")
parser.add_argument("type", help="workload type, can be `load` or `run`")
parser.add_argument("-sstsize", default=64, type=int,
help="the size of the sstable in MB")
parser.add_argument("-z", default=0.99, type=float,
help="zipfian distribution value(0.5-0.99)")
parser.add_argument("-o", default=5000000, type=int,
help="operation count")
parser.add_argument("-r", default=5000000, type=int, help="record count")
parser.add_argument("-d", default="/tmp/ycsb-rocksdb-data",
help="directory to hold the database file, default is /tmp/ycsb-rocksdb-data")
parser.add_argument("-f", default="result",
help="filename to hold the workload profiling result")
args = parser.parse_args()
pre_work()
if args.r <= 0 or args.o <= 0 or args.c <= 0 or args.sstsize <= 0:
handle_err()
if not args.z in [0.99, 0.5, 0.6, 0.7, 0.8, 0.9]:
handle_err()
if "abcde".find(args.workload) == -1 or "runload".find(args.type) == -1:
handle_err()
DBDIR = os.path.abspath(args.d)
exec_cmd("mkdir -p {DIR}".format(DIR=DBDIR))
generate_option_file()
deal_with_zipfian()
recompile()
run_ycsb()
| 35.846154
| 194
| 0.624732
|
dc895ee15d93c64bbf3f5a26b820a0ef7a04ba3c
| 28,664
|
py
|
Python
|
lib/sqlalchemy/engine/create.py
|
eddebc/sqlalchemy
|
180ae7c1a53385f72b0047496ac001ec5099cc3e
|
[
"MIT"
] | null | null | null |
lib/sqlalchemy/engine/create.py
|
eddebc/sqlalchemy
|
180ae7c1a53385f72b0047496ac001ec5099cc3e
|
[
"MIT"
] | null | null | null |
lib/sqlalchemy/engine/create.py
|
eddebc/sqlalchemy
|
180ae7c1a53385f72b0047496ac001ec5099cc3e
|
[
"MIT"
] | null | null | null |
# engine/create.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import base
from . import url as _url
from .mock import create_mock_engine
from .. import event
from .. import exc
from .. import pool as poollib
from .. import util
from ..sql import compiler
@util.deprecated_params(
strategy=(
"1.4",
"The :paramref:`_sa.create_engine.strategy` keyword is deprecated, "
"and the only argument accepted is 'mock'; please use "
":func:`.create_mock_engine` going forward. For general "
"customization of create_engine which may have been accomplished "
"using strategies, see :class:`.CreateEnginePlugin`.",
),
empty_in_strategy=(
"1.4",
"The :paramref:`_sa.create_engine.empty_in_strategy` keyword is "
"deprecated, and no longer has any effect. All IN expressions "
"are now rendered using "
'the "expanding parameter" strategy which renders a set of bound'
'expressions, or an "empty set" SELECT, at statement execution'
"time.",
),
case_sensitive=(
"1.4",
"The :paramref:`_sa.create_engine.case_sensitive` parameter "
"is deprecated and will be removed in a future release. "
"Applications should work with result column names in a case "
"sensitive fashion.",
),
)
def create_engine(url, **kwargs):
"""Create a new :class:`_engine.Engine` instance.
The standard calling form is to send the URL as the
first positional argument, usually a string
that indicates database dialect and connection arguments::
engine = create_engine("postgresql://scott:tiger@localhost/test")
Additional keyword arguments may then follow it which
establish various options on the resulting :class:`_engine.Engine`
and its underlying :class:`.Dialect` and :class:`_pool.Pool`
constructs::
engine = create_engine("mysql://scott:tiger@hostname/dbname",
encoding='latin1', echo=True)
The string form of the URL is
``dialect[+driver]://user:password@host/dbname[?key=value..]``, where
``dialect`` is a database name such as ``mysql``, ``oracle``,
``postgresql``, etc., and ``driver`` the name of a DBAPI, such as
``psycopg2``, ``pyodbc``, ``cx_oracle``, etc. Alternatively,
the URL can be an instance of :class:`~sqlalchemy.engine.url.URL`.
``**kwargs`` takes a wide variety of options which are routed
towards their appropriate components. Arguments may be specific to
the :class:`_engine.Engine`, the underlying :class:`.Dialect`,
as well as the
:class:`_pool.Pool`. Specific dialects also accept keyword arguments that
are unique to that dialect. Here, we describe the parameters
that are common to most :func:`_sa.create_engine()` usage.
Once established, the newly resulting :class:`_engine.Engine` will
request a connection from the underlying :class:`_pool.Pool` once
:meth:`_engine.Engine.connect` is called, or a method which depends on it
such as :meth:`_engine.Engine.execute` is invoked. The
:class:`_pool.Pool` in turn
will establish the first actual DBAPI connection when this request
is received. The :func:`_sa.create_engine` call itself does **not**
establish any actual DBAPI connections directly.
.. seealso::
:doc:`/core/engines`
:doc:`/dialects/index`
:ref:`connections_toplevel`
:param case_sensitive=True: if False, result column names
will match in a case-insensitive fashion, that is,
``row['SomeColumn']``.
:param connect_args: a dictionary of options which will be
passed directly to the DBAPI's ``connect()`` method as
additional keyword arguments. See the example
at :ref:`custom_dbapi_args`.
:param convert_unicode=False: if set to True, causes
all :class:`.String` datatypes to act as though the
:paramref:`.String.convert_unicode` flag has been set to ``True``,
regardless of a setting of ``False`` on an individual :class:`.String`
type. This has the effect of causing all :class:`.String` -based
columns to accommodate Python Unicode objects directly as though the
datatype were the :class:`.Unicode` type.
.. deprecated:: 1.3
The :paramref:`_sa.create_engine.convert_unicode` parameter
is deprecated and will be removed in a future release.
All modern DBAPIs now support Python Unicode directly and this
parameter is unnecessary.
:param creator: a callable which returns a DBAPI connection.
This creation function will be passed to the underlying
connection pool and will be used to create all new database
connections. Usage of this function causes connection
parameters specified in the URL argument to be bypassed.
This hook is not as flexible as the newer
:meth:`_events.DialectEvents.do_connect` hook which allows complete
control over how a connection is made to the database, given the full
set of URL arguments and state beforehand.
.. seealso::
:meth:`_events.DialectEvents.do_connect` - event hook that allows
full control over DBAPI connection mechanics.
:ref:`custom_dbapi_args`
:param echo=False: if True, the Engine will log all statements
as well as a ``repr()`` of their parameter lists to the default log
handler, which defaults to ``sys.stdout`` for output. If set to the
string ``"debug"``, result rows will be printed to the standard output
as well. The ``echo`` attribute of ``Engine`` can be modified at any
time to turn logging on and off; direct control of logging is also
available using the standard Python ``logging`` module.
.. seealso::
:ref:`dbengine_logging` - further detail on how to configure
logging.
:param echo_pool=False: if True, the connection pool will log
informational output such as when connections are invalidated
as well as when connections are recycled to the default log handler,
which defaults to ``sys.stdout`` for output. If set to the string
``"debug"``, the logging will include pool checkouts and checkins.
Direct control of logging is also available using the standard Python
``logging`` module.
.. seealso::
:ref:`dbengine_logging` - further detail on how to configure
logging.
:param empty_in_strategy: No longer used; SQLAlchemy now uses
"empty set" behavior for IN in all cases.
:param enable_from_linting: defaults to True. Will emit a warning
if a given SELECT statement is found to have un-linked FROM elements
which would cause a cartesian product.
.. versionadded:: 1.4
.. seealso::
:ref:`change_4737`
:param encoding: Defaults to ``utf-8``. This is the string
encoding used by SQLAlchemy for string encode/decode
operations which occur within SQLAlchemy, **outside of
the DBAPIs own encoding facilities.**
.. note:: The ``encoding`` parameter deals only with in-Python
encoding issues that were prevalent with many DBAPIs under Python
2. Under Python 3 it is mostly unused. For DBAPIs that require
client encoding configurations, such as those of MySQL and Oracle,
please consult specific :ref:`dialect documentation
<dialect_toplevel>` for details.
All modern DBAPIs that work in Python 3 necessarily feature direct
support for Python unicode strings. Under Python 2, this was not
always the case. For those scenarios where the DBAPI is detected as
not supporting a Python ``unicode`` object under Python 2, this
encoding is used to determine the source/destination encoding. It is
**not used** for those cases where the DBAPI handles unicode directly.
To properly configure a system to accommodate Python ``unicode``
objects, the DBAPI should be configured to handle unicode to the
greatest degree as is appropriate - see the notes on unicode pertaining
to the specific target database in use at :ref:`dialect_toplevel`.
Areas where string encoding may need to be accommodated
outside of the DBAPI, nearly always under **Python 2 only**,
include zero or more of:
* the values passed to bound parameters, corresponding to
the :class:`.Unicode` type or the :class:`.String` type
when ``convert_unicode`` is ``True``;
* the values returned in result set columns corresponding
to the :class:`.Unicode` type or the :class:`.String`
type when ``convert_unicode`` is ``True``;
* the string SQL statement passed to the DBAPI's
``cursor.execute()`` method;
* the string names of the keys in the bound parameter
dictionary passed to the DBAPI's ``cursor.execute()``
as well as ``cursor.setinputsizes()`` methods;
* the string column names retrieved from the DBAPI's
``cursor.description`` attribute.
When using Python 3, the DBAPI is required to support all of the above
values as Python ``unicode`` objects, which in Python 3 are just known
as ``str``. In Python 2, the DBAPI does not specify unicode behavior
at all, so SQLAlchemy must make decisions for each of the above values
on a per-DBAPI basis - implementations are completely inconsistent in
their behavior.
:param execution_options: Dictionary execution options which will
be applied to all connections. See
:meth:`~sqlalchemy.engine.Connection.execution_options`
:param future: Use the 2.0 style :class:`_future.Engine` and
:class:`_future.Connection` API.
.. versionadded:: 1.4
.. seealso::
:ref:`migration_20_toplevel`
:param hide_parameters: Boolean, when set to True, SQL statement parameters
will not be displayed in INFO logging nor will they be formatted into
the string representation of :class:`.StatementError` objects.
.. versionadded:: 1.3.8
:param implicit_returning=True: When ``True``, a RETURNING-
compatible construct, if available, will be used to
fetch newly generated primary key values when a single row
INSERT statement is emitted with no existing returning()
clause. This applies to those backends which support RETURNING
or a compatible construct, including PostgreSQL, Firebird, Oracle,
Microsoft SQL Server. Set this to ``False`` to disable
the automatic usage of RETURNING.
:param isolation_level: this string parameter is interpreted by various
dialects in order to affect the transaction isolation level of the
database connection. The parameter essentially accepts some subset of
these string arguments: ``"SERIALIZABLE"``, ``"REPEATABLE_READ"``,
``"READ_COMMITTED"``, ``"READ_UNCOMMITTED"`` and ``"AUTOCOMMIT"``.
Behavior here varies per backend, and
individual dialects should be consulted directly.
Note that the isolation level can also be set on a
per-:class:`_engine.Connection` basis as well, using the
:paramref:`.Connection.execution_options.isolation_level`
feature.
.. seealso::
:attr:`_engine.Connection.default_isolation_level`
- view default level
:paramref:`.Connection.execution_options.isolation_level`
- set per :class:`_engine.Connection` isolation level
:ref:`SQLite Transaction Isolation <sqlite_isolation_level>`
:ref:`PostgreSQL Transaction Isolation <postgresql_isolation_level>`
:ref:`MySQL Transaction Isolation <mysql_isolation_level>`
:ref:`session_transaction_isolation` - for the ORM
:param json_deserializer: for dialects that support the
:class:`_types.JSON`
datatype, this is a Python callable that will convert a JSON string
to a Python object. By default, the Python ``json.loads`` function is
used.
.. versionchanged:: 1.3.7 The SQLite dialect renamed this from
``_json_deserializer``.
:param json_serializer: for dialects that support the :class:`_types.JSON`
datatype, this is a Python callable that will render a given object
as JSON. By default, the Python ``json.dumps`` function is used.
.. versionchanged:: 1.3.7 The SQLite dialect renamed this from
``_json_serializer``.
:param label_length=None: optional integer value which limits
the size of dynamically generated column labels to that many
characters. If less than 6, labels are generated as
"_(counter)". If ``None``, the value of
``dialect.max_identifier_length``, which may be affected via the
:paramref:`_sa.create_engine.max_identifier_length` parameter,
is used instead. The value of
:paramref:`_sa.create_engine.label_length`
may not be larger than that of
:paramref:`_sa.create_engine.max_identfier_length`.
.. seealso::
:paramref:`_sa.create_engine.max_identifier_length`
:param listeners: A list of one or more
:class:`~sqlalchemy.interfaces.PoolListener` objects which will
receive connection pool events.
:param logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.engine" logger. Defaults to a hexstring of the
object's id.
:param max_identifier_length: integer; override the max_identifier_length
determined by the dialect. if ``None`` or zero, has no effect. This
is the database's configured maximum number of characters that may be
used in a SQL identifier such as a table name, column name, or label
name. All dialects determine this value automatically, however in the
case of a new database version for which this value has changed but
SQLAlchemy's dialect has not been adjusted, the value may be passed
here.
.. versionadded:: 1.3.9
.. seealso::
:paramref:`_sa.create_engine.label_length`
:param max_overflow=10: the number of connections to allow in
connection pool "overflow", that is connections that can be
opened above and beyond the pool_size setting, which defaults
to five. this is only used with :class:`~sqlalchemy.pool.QueuePool`.
:param module=None: reference to a Python module object (the module
itself, not its string name). Specifies an alternate DBAPI module to
be used by the engine's dialect. Each sub-dialect references a
specific DBAPI which will be imported before first connect. This
parameter causes the import to be bypassed, and the given module to
be used instead. Can be used for testing of DBAPIs as well as to
inject "mock" DBAPI implementations into the :class:`_engine.Engine`.
:param paramstyle=None: The `paramstyle <http://legacy.python.org/dev/peps/pep-0249/#paramstyle>`_
to use when rendering bound parameters. This style defaults to the
one recommended by the DBAPI itself, which is retrieved from the
``.paramstyle`` attribute of the DBAPI. However, most DBAPIs accept
more than one paramstyle, and in particular it may be desirable
to change a "named" paramstyle into a "positional" one, or vice versa.
When this attribute is passed, it should be one of the values
``"qmark"``, ``"numeric"``, ``"named"``, ``"format"`` or
``"pyformat"``, and should correspond to a parameter style known
to be supported by the DBAPI in use.
:param pool=None: an already-constructed instance of
:class:`~sqlalchemy.pool.Pool`, such as a
:class:`~sqlalchemy.pool.QueuePool` instance. If non-None, this
pool will be used directly as the underlying connection pool
for the engine, bypassing whatever connection parameters are
present in the URL argument. For information on constructing
connection pools manually, see :ref:`pooling_toplevel`.
:param poolclass=None: a :class:`~sqlalchemy.pool.Pool`
subclass, which will be used to create a connection pool
instance using the connection parameters given in the URL. Note
this differs from ``pool`` in that you don't actually
instantiate the pool in this case, you just indicate what type
of pool to be used.
:param pool_logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.pool" logger. Defaults to a hexstring of the object's
id.
:param pool_pre_ping: boolean, if True will enable the connection pool
"pre-ping" feature that tests connections for liveness upon
each checkout.
.. versionadded:: 1.2
.. seealso::
:ref:`pool_disconnects_pessimistic`
:param pool_size=5: the number of connections to keep open
inside the connection pool. This used with
:class:`~sqlalchemy.pool.QueuePool` as
well as :class:`~sqlalchemy.pool.SingletonThreadPool`. With
:class:`~sqlalchemy.pool.QueuePool`, a ``pool_size`` setting
of 0 indicates no limit; to disable pooling, set ``poolclass`` to
:class:`~sqlalchemy.pool.NullPool` instead.
:param pool_recycle=-1: this setting causes the pool to recycle
connections after the given number of seconds has passed. It
defaults to -1, or no timeout. For example, setting to 3600
means connections will be recycled after one hour. Note that
MySQL in particular will disconnect automatically if no
activity is detected on a connection for eight hours (although
this is configurable with the MySQLDB connection itself and the
server configuration as well).
.. seealso::
:ref:`pool_setting_recycle`
:param pool_reset_on_return='rollback': set the
:paramref:`_pool.Pool.reset_on_return` parameter of the underlying
:class:`_pool.Pool` object, which can be set to the values
``"rollback"``, ``"commit"``, or ``None``.
.. seealso::
:paramref:`_pool.Pool.reset_on_return`
:param pool_timeout=30: number of seconds to wait before giving
up on getting a connection from the pool. This is only used
with :class:`~sqlalchemy.pool.QueuePool`.
:param pool_use_lifo=False: use LIFO (last-in-first-out) when retrieving
connections from :class:`.QueuePool` instead of FIFO
(first-in-first-out). Using LIFO, a server-side timeout scheme can
reduce the number of connections used during non- peak periods of
use. When planning for server-side timeouts, ensure that a recycle or
pre-ping strategy is in use to gracefully handle stale connections.
.. versionadded:: 1.3
.. seealso::
:ref:`pool_use_lifo`
:ref:`pool_disconnects`
:param plugins: string list of plugin names to load. See
:class:`.CreateEnginePlugin` for background.
.. versionadded:: 1.2.3
:param query_cache_size: size of the cache used to cache the SQL string
form of queries. Set to zero to disable caching.
The cache is pruned of its least recently used items when its size reaches
N * 1.5. Defaults to 500, meaning the cache will always store at least
500 SQL statements when filled, and will grow up to 750 items at which
point it is pruned back down to 500 by removing the 250 least recently
used items.
Caching is accomplished on a per-statement basis by generating a
cache key that represents the statement's structure, then generating
string SQL for the current dialect only if that key is not present
in the cache. All statements support caching, however some features
such as an INSERT with a large set of parameters will intentionally
bypass the cache. SQL logging will indicate statistics for each
statement whether or not it were pull from the cache.
.. note:: some ORM functions related to unit-of-work persistence as well
as some attribute loading strategies will make use of individual
per-mapper caches outside of the main cache.
.. seealso::
:ref:`sql_caching`
.. versionadded:: 1.4
""" # noqa
if "strategy" in kwargs:
strat = kwargs.pop("strategy")
if strat == "mock":
return create_mock_engine(url, **kwargs)
else:
raise exc.ArgumentError("unknown strategy: %r" % strat)
kwargs.pop("empty_in_strategy", None)
# create url.URL object
u = _url.make_url(url)
plugins = u._instantiate_plugins(kwargs)
u.query.pop("plugin", None)
kwargs.pop("plugins", None)
entrypoint = u._get_entrypoint()
dialect_cls = entrypoint.get_dialect_cls(u)
if kwargs.pop("_coerce_config", False):
def pop_kwarg(key, default=None):
value = kwargs.pop(key, default)
if key in dialect_cls.engine_config_types:
value = dialect_cls.engine_config_types[key](value)
return value
else:
pop_kwarg = kwargs.pop
dialect_args = {}
# consume dialect arguments from kwargs
for k in util.get_cls_kwargs(dialect_cls):
if k in kwargs:
dialect_args[k] = pop_kwarg(k)
dbapi = kwargs.pop("module", None)
if dbapi is None:
dbapi_args = {}
for k in util.get_func_kwargs(dialect_cls.dbapi):
if k in kwargs:
dbapi_args[k] = pop_kwarg(k)
dbapi = dialect_cls.dbapi(**dbapi_args)
dialect_args["dbapi"] = dbapi
dialect_args.setdefault("compiler_linting", compiler.NO_LINTING)
enable_from_linting = kwargs.pop("enable_from_linting", True)
if enable_from_linting:
dialect_args["compiler_linting"] ^= compiler.COLLECT_CARTESIAN_PRODUCTS
for plugin in plugins:
plugin.handle_dialect_kwargs(dialect_cls, dialect_args)
# create dialect
dialect = dialect_cls(**dialect_args)
# assemble connection arguments
(cargs, cparams) = dialect.create_connect_args(u)
cparams.update(pop_kwarg("connect_args", {}))
cargs = list(cargs) # allow mutability
# look for existing pool or create
pool = pop_kwarg("pool", None)
if pool is None:
def connect(connection_record=None):
if dialect._has_events:
for fn in dialect.dispatch.do_connect:
connection = fn(dialect, connection_record, cargs, cparams)
if connection is not None:
return connection
return dialect.connect(*cargs, **cparams)
creator = pop_kwarg("creator", connect)
poolclass = pop_kwarg("poolclass", None)
if poolclass is None:
poolclass = dialect_cls.get_pool_class(u)
pool_args = {"dialect": dialect}
# consume pool arguments from kwargs, translating a few of
# the arguments
translate = {
"logging_name": "pool_logging_name",
"echo": "echo_pool",
"timeout": "pool_timeout",
"recycle": "pool_recycle",
"events": "pool_events",
"reset_on_return": "pool_reset_on_return",
"pre_ping": "pool_pre_ping",
"use_lifo": "pool_use_lifo",
}
for k in util.get_cls_kwargs(poolclass):
tk = translate.get(k, k)
if tk in kwargs:
pool_args[k] = pop_kwarg(tk)
for plugin in plugins:
plugin.handle_pool_kwargs(poolclass, pool_args)
pool = poolclass(creator, **pool_args)
else:
if isinstance(pool, poollib.dbapi_proxy._DBProxy):
pool = pool.get_pool(*cargs, **cparams)
pool._dialect = dialect
# create engine.
if kwargs.pop("future", False):
from sqlalchemy import future
default_engine_class = future.Engine
else:
default_engine_class = base.Engine
engineclass = kwargs.pop("_future_engine_class", default_engine_class)
engine_args = {}
for k in util.get_cls_kwargs(engineclass):
if k in kwargs:
engine_args[k] = pop_kwarg(k)
_initialize = kwargs.pop("_initialize", True)
# all kwargs should be consumed
if kwargs:
raise TypeError(
"Invalid argument(s) %s sent to create_engine(), "
"using configuration %s/%s/%s. Please check that the "
"keyword arguments are appropriate for this combination "
"of components."
% (
",".join("'%s'" % k for k in kwargs),
dialect.__class__.__name__,
pool.__class__.__name__,
engineclass.__name__,
)
)
engine = engineclass(pool, dialect, u, **engine_args)
if _initialize:
do_on_connect = dialect.on_connect()
if do_on_connect:
def on_connect(dbapi_connection, connection_record):
conn = getattr(
dbapi_connection, "_sqla_unwrap", dbapi_connection
)
if conn is None:
return
do_on_connect(conn)
event.listen(pool, "connect", on_connect)
def first_connect(dbapi_connection, connection_record):
c = base.Connection(
engine, connection=dbapi_connection, _has_events=False
)
c._execution_options = util.immutabledict()
dialect.initialize(c)
dialect.do_rollback(c.connection)
if do_on_connect:
event.listen(
pool, "connect", first_connect, _once_unless_exception=True
)
else:
event.listen(
pool,
"first_connect",
first_connect,
_once_unless_exception=True,
)
dialect_cls.engine_created(engine)
if entrypoint is not dialect_cls:
entrypoint.engine_created(engine)
for plugin in plugins:
plugin.engine_created(engine)
return engine
def engine_from_config(configuration, prefix="sqlalchemy.", **kwargs):
"""Create a new Engine instance using a configuration dictionary.
The dictionary is typically produced from a config file.
The keys of interest to ``engine_from_config()`` should be prefixed, e.g.
``sqlalchemy.url``, ``sqlalchemy.echo``, etc. The 'prefix' argument
indicates the prefix to be searched for. Each matching key (after the
prefix is stripped) is treated as though it were the corresponding keyword
argument to a :func:`_sa.create_engine` call.
The only required key is (assuming the default prefix) ``sqlalchemy.url``,
which provides the :ref:`database URL <database_urls>`.
A select set of keyword arguments will be "coerced" to their
expected type based on string values. The set of arguments
is extensible per-dialect using the ``engine_config_types`` accessor.
:param configuration: A dictionary (typically produced from a config file,
but this is not a requirement). Items whose keys start with the value
of 'prefix' will have that prefix stripped, and will then be passed to
:func:`_sa.create_engine`.
:param prefix: Prefix to match and then strip from keys
in 'configuration'.
:param kwargs: Each keyword argument to ``engine_from_config()`` itself
overrides the corresponding item taken from the 'configuration'
dictionary. Keyword arguments should *not* be prefixed.
"""
options = dict(
(key[len(prefix) :], configuration[key])
for key in configuration
if key.startswith(prefix)
)
options["_coerce_config"] = True
options.update(kwargs)
url = options.pop("url")
return create_engine(url, **options)
| 40.600567
| 102
| 0.662329
|
ab86cf81844cc8647f9e5c5f4d60b1e4358eda0e
| 2,391
|
py
|
Python
|
examples/nanosleep_watcher.py
|
facebookresearch/py2bpf
|
e1721ef129cf2ac838db00f2c1be0bcd19b30665
|
[
"BSD-3-Clause"
] | 171
|
2018-01-03T20:16:46.000Z
|
2022-03-28T10:34:22.000Z
|
examples/nanosleep_watcher.py
|
facebookresearch/py2bpf
|
e1721ef129cf2ac838db00f2c1be0bcd19b30665
|
[
"BSD-3-Clause"
] | 2
|
2018-07-02T22:09:22.000Z
|
2019-07-22T13:33:01.000Z
|
examples/nanosleep_watcher.py
|
facebookresearch/py2bpf
|
e1721ef129cf2ac838db00f2c1be0bcd19b30665
|
[
"BSD-3-Clause"
] | 25
|
2018-01-09T10:39:08.000Z
|
2021-11-06T16:29:30.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import ctypes
import py2bpf.datastructures
import py2bpf.funcs
import py2bpf.kprobe
import py2bpf.util
def run_probe():
# Ensure that we don't run into any pesky ulimits
py2bpf.util.ensure_resources()
# This is a totally vanilla ctypes struct. It'll be auto-translated in
# the context of the bpf-converted python function, so it'll look
# pretty normal in use.
class TimeSpec(ctypes.Structure):
_fields_ = [
('tv_sec', ctypes.c_int64),
('tv_nsec', ctypes.c_int64),
]
# This is our output type
class NanoSleepEvent(ctypes.Structure):
_fields_ = [
('comm', ctypes.c_char * 16),
('ts', TimeSpec),
]
# Create a queue to exfiltrate the comms to us
q = py2bpf.datastructures.BpfQueue(NanoSleepEvent)
# Write our probing function. The probe decorator turns it into a
# function that returns a BpfProbe.
@py2bpf.kprobe.probe('sys_nanosleep')
def watch_nanosleep(pt_regs):
nse = NanoSleepEvent()
# Read the "comm" or short description of the running process
py2bpf.funcs.get_current_comm(nse.comm)
# Read the time spec argument. It's a pointer to arbitrary memory,
# so we'll have to use probe_read to read it safely. This could
# fail and return a non-zero code, but I'm being lazy and assuming
# success here.
py2bpf.funcs.probe_read(nse.ts, pt_regs.rdi)
# Send the NanoSleepEvent back to userspace through the BpfQueue
py2bpf.funcs.perf_event_output(
pt_regs, q, py2bpf.funcs.get_smp_processor_id(), nse)
return 0
# We use the `with` syntax to insert it
with watch_nanosleep():
# We iterate over all items returned from the queue. When there
# are no more objects in the queue, we simply block.
for nse in q:
print('comm={} tv_sec={} tv_nsec={}'.format(
nse.comm.decode(),
nse.ts.tv_sec,
nse.ts.tv_nsec))
def main():
try:
run_probe()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
| 29.518519
| 74
| 0.642409
|
e98e75020a931aa10e309dd306fe0558492d3f57
| 3,265
|
py
|
Python
|
PaddleCV/yolov3/infer.py
|
XiaoguangHu01/models
|
a95d49323ed504e5a9164586f171f408954fd43a
|
[
"Apache-2.0"
] | 4
|
2020-01-04T13:15:02.000Z
|
2021-07-21T07:50:02.000Z
|
PaddleCV/yolov3/infer.py
|
XiaoguangHu01/models
|
a95d49323ed504e5a9164586f171f408954fd43a
|
[
"Apache-2.0"
] | null | null | null |
PaddleCV/yolov3/infer.py
|
XiaoguangHu01/models
|
a95d49323ed504e5a9164586f171f408954fd43a
|
[
"Apache-2.0"
] | 3
|
2019-10-31T07:18:49.000Z
|
2020-01-13T03:18:39.000Z
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import numpy as np
import paddle
import paddle.fluid as fluid
import box_utils
import reader
from utility import print_arguments, parse_args, check_gpu
from models.yolov3 import YOLOv3
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval, Params
from config import cfg
def infer():
# check if set use_gpu=True in paddlepaddle cpu version
check_gpu(cfg.use_gpu)
if not os.path.exists('output'):
os.mkdir('output')
model = YOLOv3(is_train=False)
model.build_model()
outputs = model.get_pred()
input_size = cfg.input_size
place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
# yapf: disable
if cfg.weights:
def if_exist(var):
return os.path.exists(os.path.join(cfg.weights, var.name))
fluid.io.load_vars(exe, cfg.weights, predicate=if_exist)
# yapf: enable
# you can save inference model by following code
# fluid.io.save_inference_model("./output/yolov3",
# feeded_var_names=['image', 'im_shape'],
# target_vars=outputs,
# executor=exe)
feeder = fluid.DataFeeder(place=place, feed_list=model.feeds())
fetch_list = [outputs]
image_names = []
if cfg.image_name is not None:
image_names.append(cfg.image_name)
else:
for image_name in os.listdir(cfg.image_path):
if image_name.split('.')[-1] in ['jpg', 'png']:
image_names.append(image_name)
for image_name in image_names:
infer_reader = reader.infer(input_size,
os.path.join(cfg.image_path, image_name))
label_names, _ = reader.get_label_infos()
data = next(infer_reader())
im_shape = data[0][2]
outputs = exe.run(fetch_list=[v.name for v in fetch_list],
feed=feeder.feed(data),
return_numpy=False,
use_program_cache=True)
bboxes = np.array(outputs[0])
if bboxes.shape[1] != 6:
print("No object found in {}".format(image_name))
continue
labels = bboxes[:, 0].astype('int32')
scores = bboxes[:, 1].astype('float32')
boxes = bboxes[:, 2:].astype('float32')
path = os.path.join(cfg.image_path, image_name)
box_utils.draw_boxes_on_image(path, boxes, scores, labels, label_names,
cfg.draw_thresh)
if __name__ == '__main__':
args = parse_args()
print_arguments(args)
infer()
| 35.48913
| 79
| 0.635222
|
33ab2c1e646e05ea207357e9d80ca43fa67bf1b5
| 8,100
|
py
|
Python
|
test/MongoUtil_test.py
|
Tianhao-Gu/handle_service2
|
6cb9b4f43925f64aed96779ce408d56bfe7ec185
|
[
"MIT"
] | null | null | null |
test/MongoUtil_test.py
|
Tianhao-Gu/handle_service2
|
6cb9b4f43925f64aed96779ce408d56bfe7ec185
|
[
"MIT"
] | 15
|
2019-05-22T05:26:46.000Z
|
2021-07-12T02:09:55.000Z
|
test/MongoUtil_test.py
|
Tianhao-Gu/handle_service2
|
6cb9b4f43925f64aed96779ce408d56bfe7ec185
|
[
"MIT"
] | 6
|
2019-03-11T20:20:37.000Z
|
2021-01-08T21:41:36.000Z
|
# -*- coding: utf-8 -*-
import os
import unittest
from configparser import ConfigParser
import inspect
import copy
import threading
import queue
from random import randrange
from mongo_util import MongoHelper
from AbstractHandle.Utils.MongoUtil import MongoUtil
class MongoUtilTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
config_file = os.environ.get('KB_DEPLOYMENT_CONFIG', None)
cls.cfg = {}
config = ConfigParser()
config.read(config_file)
for nameval in config.items('AbstractHandle'):
cls.cfg[nameval[0]] = nameval[1]
cls.cfg['mongo-collection'] = 'handle'
cls.cfg['mongo-hid-counter-collection'] = 'handle_id_counter'
cls.cfg['mongo-authmechanism'] = 'DEFAULT'
cls.mongo_helper = MongoHelper()
cls.my_client = cls.mongo_helper.create_test_db(db=cls.cfg['mongo-database'],
col=cls.cfg['mongo-collection'])
cls.mongo_util = MongoUtil(cls.cfg)
@classmethod
def tearDownClass(cls):
print('Finished testing MongoUtil')
def getMongoUtil(self):
return self.__class__.mongo_util
def start_test(self):
testname = inspect.stack()[1][3]
print('\n*** starting test: ' + testname + ' **')
def test_get_collection(self):
self.start_test()
mongo_util = self.getMongoUtil()
with self.assertRaises(ValueError) as context:
mongo_util._get_collection('fake_mongo_host', 1234, 'mongo_database', 'mongo_collection')
self.assertIn('Connot connect to Mongo server', str(context.exception.args))
def test_init_ok(self):
self.start_test()
class_attri = ['mongo_host', 'mongo_port', 'mongo_database', 'mongo_collection',
'handle_collection', 'hid_counter_collection']
mongo_util = self.getMongoUtil()
self.assertTrue(set(class_attri) <= set(mongo_util.__dict__.keys()))
handle_collection = mongo_util.handle_collection
self.assertEqual(handle_collection.name, 'handle')
self.assertEqual(handle_collection.count_documents({}), 10)
handle_col_info = handle_collection.index_information()
handle_col_idx = handle_col_info.keys()
expected_idx = ['_id_', 'hid_1']
self.assertEqual(list(handle_col_idx), expected_idx)
self.assertTrue(handle_col_info['hid_1']['unique'])
hid_counter_collection = mongo_util.hid_counter_collection
self.assertEqual(hid_counter_collection.name, 'handle_id_counter')
def test_find_in_ok(self):
self.start_test()
mongo_util = self.getMongoUtil()
# test query 'hid' field
elements = [68020, 68022, 0]
docs = mongo_util.find_in(elements, 'hid')
self.assertEqual(docs.count(), 2)
# test query 'hid' field with empty data
elements = [0]
docs = mongo_util.find_in(elements, 'hid')
self.assertEqual(docs.count(), 0)
# test query 'id' field
elements = ['b753774f-0bbd-4b96-9202-89b0c70bf31c']
docs = mongo_util.find_in(elements, 'id')
self.assertEqual(docs.count(), 1)
doc = docs.next()
self.assertFalse('_id' in doc.keys())
self.assertEqual(doc.get('hid'), 68020)
# test null projection
elements = ['b753774f-0bbd-4b96-9202-89b0c70bf31c']
docs = mongo_util.find_in(elements, 'id', projection=None)
self.assertEqual(docs.count(), 1)
doc = docs.next()
self.assertEqual(doc.get('_id'), 68020)
self.assertEqual(doc.get('hid'), 68020)
def test_update_one_ok(self):
self.start_test()
mongo_util = self.getMongoUtil()
elements = ['b753774f-0bbd-4b96-9202-89b0c70bf31c']
docs = mongo_util.find_in(elements, 'id', projection=None)
self.assertEqual(docs.count(), 1)
doc = docs.next()
self.assertEqual(doc.get('created_by'), 'tgu2')
update_doc = copy.deepcopy(doc)
new_user = 'test_user'
update_doc['created_by'] = new_user
mongo_util.update_one(update_doc)
docs = mongo_util.find_in(elements, 'id', projection=None)
new_doc = docs.next()
self.assertEqual(new_doc.get('created_by'), new_user)
mongo_util.update_one(doc)
def test_insert_one_ok(self):
self.start_test()
mongo_util = self.getMongoUtil()
self.assertEqual(mongo_util.handle_collection.find().count(), 10)
doc = {'_id': 9999, 'hid': 9999, 'file_name': 'fake_file'}
counter = mongo_util.get_hid_counter()
mongo_util.insert_one(doc)
new_counter = mongo_util.get_hid_counter()
self.assertEqual(new_counter, counter)
self.assertEqual(mongo_util.handle_collection.find().count(), 11)
elements = [9999]
docs = mongo_util.find_in(elements, 'hid', projection=None)
self.assertEqual(docs.count(), 1)
doc = docs.next()
self.assertEqual(doc.get('hid'), 9999)
self.assertEqual(doc.get('file_name'), 'fake_file')
mongo_util.delete_one(doc)
self.assertEqual(mongo_util.handle_collection.find().count(), 10)
def test_increase_counter_with_multi_threads(self):
mongo_util = self.getMongoUtil()
counter = mongo_util.get_hid_counter()
thread_count = 329
threads = list()
hids = list()
que = queue.Queue()
for index in range(thread_count):
x = threading.Thread(target=que.put(mongo_util.increase_counter()))
threads.append(x)
x.start()
for index, thread in enumerate(threads):
thread.join()
while not que.empty():
hids.append(que.get())
new_counter = mongo_util.get_hid_counter()
self.assertEqual(counter + thread_count, new_counter)
self.assertEqual(len(set(hids)), thread_count)
self.assertEqual(len(hids), len(set(hids)))
hids.sort()
self.assertEqual(hids[0], counter + 1)
self.assertEqual(hids[-1], new_counter)
rand_pos = randrange(thread_count)
self.assertEqual(hids[rand_pos], counter + 1 + rand_pos)
rand_pos = randrange(thread_count)
self.assertEqual(hids[-rand_pos], new_counter + 1 - rand_pos)
def test_delete_one_ok(self):
self.start_test()
mongo_util = self.getMongoUtil()
docs = mongo_util.handle_collection.find()
self.assertEqual(docs.count(), 10)
doc = docs.next()
hid = doc.get('hid')
mongo_util.delete_one(doc)
self.assertEqual(mongo_util.handle_collection.find().count(), 9)
docs = mongo_util.find_in([hid], 'hid', projection=None)
self.assertEqual(docs.count(), 0)
mongo_util.insert_one(doc)
self.assertEqual(mongo_util.handle_collection.find().count(), 10)
docs = mongo_util.find_in([hid], 'hid', projection=None)
self.assertEqual(docs.count(), 1)
def test_delete_many_ok(self):
self.start_test()
mongo_util = self.getMongoUtil()
docs = mongo_util.handle_collection.find()
self.assertEqual(docs.count(), 10)
docs_to_delete = list()
docs_to_delete.append(docs.next())
docs_to_delete.append(docs.next())
docs_to_delete = docs_to_delete * 2 # test delete duplicate items
deleted_count = mongo_util.delete_many(docs_to_delete)
self.assertEqual(deleted_count, 2)
self.assertEqual(mongo_util.handle_collection.find().count(), 8)
docs = mongo_util.find_in([doc.get('hid') for doc in docs_to_delete], 'hid')
self.assertEqual(docs.count(), 0)
for doc in docs_to_delete:
try:
mongo_util.insert_one(doc)
except Exception:
pass
self.assertEqual(mongo_util.handle_collection.find().count(), 10)
docs = mongo_util.find_in([doc.get('hid') for doc in docs_to_delete], 'hid')
self.assertEqual(docs.count(), 2)
| 35.526316
| 101
| 0.637407
|
6e0cd9dfee0e93820dc1270f387ff8de023c2de2
| 9,079
|
py
|
Python
|
WassersteinGAN/src/model/models_WGAN.py
|
voletiv/DeepLearningImplementations
|
22ec85cdc7daa308ff2bec81962ca77e5959a70b
|
[
"MIT"
] | null | null | null |
WassersteinGAN/src/model/models_WGAN.py
|
voletiv/DeepLearningImplementations
|
22ec85cdc7daa308ff2bec81962ca77e5959a70b
|
[
"MIT"
] | null | null | null |
WassersteinGAN/src/model/models_WGAN.py
|
voletiv/DeepLearningImplementations
|
22ec85cdc7daa308ff2bec81962ca77e5959a70b
|
[
"MIT"
] | 1
|
2018-03-23T15:52:11.000Z
|
2018-03-23T15:52:11.000Z
|
import numpy as np
import keras.backend as K
from keras.models import Model
from keras.layers import Input
from keras.initializers import RandomNormal
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.normalization import BatchNormalization
from keras.layers.core import Dense, Activation, Reshape
from keras.layers.convolutional import Conv2D, Deconv2D, UpSampling2D
from keras.layers.pooling import GlobalAveragePooling2D
def wasserstein(y_true, y_pred):
# return K.mean(y_true * y_pred) / K.mean(y_true)
return K.mean(y_true * y_pred)
def visualize_model(model):
model.summary()
from keras.utils import plot_model
plot_model(model,
to_file='../../figures/%s.png' % model.name,
show_shapes=True,
show_layer_names=True)
def generator_toy(noise_dim, model_name="generator_toy"):
"""
Simple MLP generator for the MoG unrolled GAN toy experiment
"""
gen_input = Input(shape=noise_dim, name="generator_input")
x = Dense(128)(gen_input)
x = Activation("tanh")(x)
x = Dense(128)(x)
x = Activation("tanh")(x)
x = Dense(2)(x)
generator_model = Model(inputs=[gen_input], outputs=[x], name=model_name)
visualize_model(generator_model)
return generator_model
def discriminator_toy(model_name="discriminator_toy"):
"""
Simple MLP discriminator for the MoG unrolled GAN toy experiment
"""
disc_input = Input(shape=(2,), name="discriminator_input")
x = Dense(128)(disc_input)
x = Activation("tanh")(x)
x = Dense(128)(x)
x = Activation("tanh")(x)
x = Dense(1)(x)
discriminator_model = Model(inputs=[disc_input], outputs=[x], name=model_name)
visualize_model(discriminator_model)
return discriminator_model
def GAN_toy(generator, discriminator, noise_dim):
"""
Simple GAN genrator + discriminator for the MoG unrolled GAN toy experiment
"""
gen_input = Input(shape=noise_dim, name="noise_input")
generated_sample = generator(gen_input)
GAN_output = discriminator(generated_sample)
GAN_toy = Model(inputs=[gen_input],
outputs=[GAN_output],
name="GAN_toy")
visualize_model(GAN_toy)
return GAN_toy
def generator_upsampling(noise_dim, img_dim, model_name="generator_upsampling", dset="mnist"):
"""DCGAN generator based on Upsampling and Conv2D
Args:
noise_dim: Dimension of the noise input
img_dim: dimension of the image output
model_name: model name (default: {"generator_upsampling"})
dset: dataset (default: {"mnist"})
Returns:
keras model
"""
s = img_dim[1]
f = 512
if dset == "mnist":
start_dim = int(s / 4)
nb_upconv = 2
elif dset == "celebA":
start_dim = int(s / 16)
nb_upconv = 4
else:
o = s
nb_upconv = 0
while o > 7:
o = o/2
nb_upconv += 1
start_dim = int(o)
if K.image_dim_ordering() == "th":
bn_axis = 1
reshape_shape = (f, start_dim, start_dim)
output_channels = img_dim[0]
else:
reshape_shape = (start_dim, start_dim, f)
bn_axis = -1
output_channels = img_dim[-1]
gen_input = Input(shape=noise_dim, name="generator_input")
# Noise input and reshaping
x = Dense(f * start_dim * start_dim, input_dim=noise_dim)(gen_input)
x = Reshape(reshape_shape)(x)
x = BatchNormalization(axis=bn_axis)(x)
x = Activation("relu")(x)
# Upscaling blocks: Upsampling2D->Conv2D->ReLU->BN->Conv2D->ReLU
for i in range(nb_upconv):
x = UpSampling2D(size=(2, 2))(x)
nb_filters = int(f / (2 ** (i + 1)))
x = Conv2D(nb_filters, (3, 3), padding="same", kernel_initializer=RandomNormal(stddev=0.02))(x)
x = BatchNormalization(axis=1)(x)
x = Activation("relu")(x)
x = Conv2D(nb_filters, (3, 3), padding="same", kernel_initializer=RandomNormal(stddev=0.02))(x)
x = Activation("relu")(x)
# Last Conv to get the output image
x = Conv2D(output_channels, (3, 3), name="gen_conv2d_final",
padding="same", activation='tanh', kernel_initializer=RandomNormal(stddev=0.02))(x)
generator_model = Model(inputs=[gen_input], outputs=[x], name=model_name)
visualize_model(generator_model)
return generator_model
def generator_deconv(noise_dim, img_dim, batch_size, model_name="generator_deconv", dset="mnist"):
"""DCGAN generator based on Deconv2D
Args:
noise_dim: Dimension of the noise input
img_dim: dimension of the image output
batch_size: needed to reshape after the deconv2D
model_name: model name (default: {"generator_deconv"})
dset: dataset (default: {"mnist"})
Returns:
keras model
"""
assert K.backend() == "tensorflow", "Deconv not implemented with theano"
s = img_dim[1]
f = 512
if dset == "mnist":
start_dim = int(s / 4)
nb_upconv = 2
elif dset == "celebA":
start_dim = int(s / 16)
nb_upconv = 4
else:
o = s
nb_upconv = 0
while o > 7:
o = o/2
nb_upconv += 1
start_dim = int(o)
reshape_shape = (start_dim, start_dim, f)
bn_axis = -1
output_channels = img_dim[-1]
gen_input = Input(shape=noise_dim, name="generator_input")
# Noise input and reshaping
x = Dense(f * start_dim * start_dim, input_dim=noise_dim, use_bias=False)(gen_input)
x = Reshape(reshape_shape)(x)
x = BatchNormalization(axis=bn_axis)(x)
x = Activation("relu")(x)
# Transposed conv blocks: Deconv2D->BN->ReLU
for i in range(nb_upconv - 1):
nb_filters = int(f / (2 ** (i + 1)))
s = start_dim * (2 ** (i + 1))
o_shape = (batch_size, s, s, nb_filters)
x = Deconv2D(nb_filters, (3, 3),
output_shape=o_shape, strides=(2, 2),
padding="same", use_bias=False,
kernel_initializer=RandomNormal(stddev=0.02))(x)
x = BatchNormalization(axis=-1)(x)
x = Activation("relu")(x)
# Last block
s = start_dim * (2 ** (nb_upconv))
o_shape = (batch_size, s, s, output_channels)
x = Deconv2D(output_channels, (3, 3),
output_shape=o_shape, strides=(2, 2),
padding="same", use_bias=False,
kernel_initializer=RandomNormal(stddev=0.02))(x)
x = Activation("tanh")(x)
generator_model = Model(inputs=[gen_input], outputs=[x], name=model_name)
visualize_model(generator_model)
return generator_model
def discriminator(img_dim, model_name="discriminator"):
"""DCGAN discriminator
Args:
img_dim: dimension of the image output
model_name: model name (default: {"generator_deconv"})
Returns:
keras model
"""
if K.image_dim_ordering() == "th":
bn_axis = 1
min_s = min(img_dim[1:])
else:
bn_axis = -1
min_s = min(img_dim[:-1])
disc_input = Input(shape=img_dim, name="discriminator_input")
# Get the list of number of conv filters
# (first layer starts with 64), filters are subsequently doubled
nb_conv = int(np.floor(np.log(min_s // 4) / np.log(2)))
list_f = [64 * min(8, (2 ** i)) for i in range(nb_conv)]
# First conv with 2x2 strides
x = Conv2D(list_f[0], (3, 3), strides=(2, 2), name="disc_conv2d_1",
padding="same", use_bias=False,
kernel_initializer=RandomNormal(stddev=0.02))(disc_input)
x = BatchNormalization(axis=bn_axis)(x)
x = LeakyReLU(0.2)(x)
# Conv blocks: Conv2D(2x2 strides)->BN->LReLU
for i, f in enumerate(list_f[1:]):
name = "disc_conv2d_%s" % (i + 2)
x = Conv2D(f, (3, 3), strides=(2, 2), name=name, padding="same", use_bias=False,
kernel_initializer=RandomNormal(stddev=0.02))(x)
x = BatchNormalization(axis=bn_axis)(x)
x = LeakyReLU(0.2)(x)
# Last convolution
x = Conv2D(1, (3, 3), name="last_conv", padding="same", use_bias=False,
kernel_initializer=RandomNormal(stddev=0.02))(x)
# Average pooling
x = GlobalAveragePooling2D()(x)
discriminator_model = Model(inputs=[disc_input], outputs=[x], name=model_name)
visualize_model(discriminator_model)
return discriminator_model
def DCGAN(generator, discriminator, noise_dim, img_dim):
"""DCGAN generator + discriminator model
Args:
generator: keras generator model
discriminator: keras discriminator model
noise_dim: generator input noise dimension
img_dim: real image data dimension
Returns:
keras model
"""
noise_input = Input(shape=noise_dim, name="noise_input")
generated_image = generator(noise_input)
DCGAN_output = discriminator(generated_image)
DCGAN = Model(inputs=[noise_input],
outputs=[DCGAN_output],
name="DCGAN")
visualize_model(DCGAN)
return DCGAN
| 30.466443
| 103
| 0.630906
|
ffa60d3358afb8eed5d37cb348ec4f1a68e6d5d2
| 2,715
|
py
|
Python
|
data/cirq_new/cirq_program/startCirq_pragma307.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/cirq_new/cirq_program/startCirq_pragma307.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/cirq_new/cirq_program/startCirq_pragma307.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=16
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
class Opty(cirq.PointOptimizer):
def optimization_at(
self,
circuit: 'cirq.Circuit',
index: int,
op: 'cirq.Operation'
) -> Optional[cirq.PointOptimizationSummary]:
if (isinstance(op, cirq.ops.GateOperation) and isinstance(op.gate, cirq.CZPowGate)):
return cirq.PointOptimizationSummary(
clear_span=1,
clear_qubits=op.qubits,
new_operations=[
cirq.CZ(*op.qubits),
cirq.X.on_each(*op.qubits),
cirq.X.on_each(*op.qubits),
]
)
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.rx(1.6147786239451536).on(input_qubit[3])) # number=5
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=8
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=9
c.append(cirq.H.on(input_qubit[0])) # number=10
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=7
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=11
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=12
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[1])) # number=13
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=14
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=15
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_pragma307.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
| 33.109756
| 92
| 0.648619
|
7eca569d6a711c315551ea89258f34213fa431d1
| 893
|
py
|
Python
|
src/gocept/autocomplete/tests/test_selenium.py
|
gocept/gocept.autocomplete
|
0a48cdfe5af2a913e36f1a4734319e145001694b
|
[
"ZPL-2.1"
] | null | null | null |
src/gocept/autocomplete/tests/test_selenium.py
|
gocept/gocept.autocomplete
|
0a48cdfe5af2a913e36f1a4734319e145001694b
|
[
"ZPL-2.1"
] | null | null | null |
src/gocept/autocomplete/tests/test_selenium.py
|
gocept/gocept.autocomplete
|
0a48cdfe5af2a913e36f1a4734319e145001694b
|
[
"ZPL-2.1"
] | 1
|
2021-03-01T03:24:42.000Z
|
2021-03-01T03:24:42.000Z
|
# Copyright (c) 2010 gocept gmbh & co. kg
# See also LICENSE.txt
import gocept.autocomplete.testing
import unittest
@unittest.skip('foo')
class AutocompleteTest(gocept.autocomplete.testing.SeleniumTestCase):
def test_autocomplete(self):
s = self.selenium
# XXX: logging in this way on /demo directly (which does not *require*
# login) does not work
s.open('http://mgr:mgrpw@%s/manage' % self.selenium.server)
s.open('/demo')
# XXX: this *looks* like we're entering 'rr' (when one observes the
# browser), but it does the right thing -- and all other combination
# of calls I tried didn't work at all. :-(
s.type('id=form-widgets-color', 'r')
s.typeKeys('id=form-widgets-color', 'r')
s.waitForValue('id=form-widgets-color', 'red')
s.verifyText('id=form-widgets-color-container', '*red*')
| 34.346154
| 78
| 0.645017
|
6076939dbcf4291c3b38cc823363c7a06980bc16
| 55
|
py
|
Python
|
accencis/settings/__init__.py
|
majestylink/majestyAccencis
|
41bdde6f9982980609f93a8b44bcaf06cc5f6ea6
|
[
"MIT"
] | null | null | null |
accencis/settings/__init__.py
|
majestylink/majestyAccencis
|
41bdde6f9982980609f93a8b44bcaf06cc5f6ea6
|
[
"MIT"
] | null | null | null |
accencis/settings/__init__.py
|
majestylink/majestyAccencis
|
41bdde6f9982980609f93a8b44bcaf06cc5f6ea6
|
[
"MIT"
] | null | null | null |
#from .development import *
# from .production import *
| 27.5
| 27
| 0.745455
|
0d3014eba7c49d344407cd11b757aa956e72b76e
| 1,409
|
py
|
Python
|
cursoPython1/s10l23.py
|
sistemasmarcelocastro/pruebas-python
|
d5fa7e67fa49c0ab120ab26abf0a41baad4ce5bb
|
[
"MIT"
] | null | null | null |
cursoPython1/s10l23.py
|
sistemasmarcelocastro/pruebas-python
|
d5fa7e67fa49c0ab120ab26abf0a41baad4ce5bb
|
[
"MIT"
] | null | null | null |
cursoPython1/s10l23.py
|
sistemasmarcelocastro/pruebas-python
|
d5fa7e67fa49c0ab120ab26abf0a41baad4ce5bb
|
[
"MIT"
] | null | null | null |
#Encontrar patrones en un texto
#sin RegEx:
""" Sin RegEx
def estel(entrada):
if len(entrada) != 12:
return False # longitud incorrecta
for i in range(0, 3):
if not entrada[i].isdecimal():
return False # no hay prefijo
if entrada[3] != '-':
return False # no tiene guión
for i in range(4, 7):
if not entrada[i].isdecimal():
return False # no hay primeros 3 digitos
if entrada[7] != '-':
return False # no hay segundo guion
for i in range(8, 12):
if not entrada[i].isdecimal():
return False # no hay últimos 4 digitos
return True
def enctel(entrada):
seEncontro = False
for i in range(len(entrada)):
pedazo = entrada[i:i + 12]
if estel(pedazo):
print('se encontró el tel: ' + pedazo)
seEncontro = True
if not seEncontro:
print('No hay teléfonos en el texto')
entrada = 'asdlkasdjf 123-456-7890 skskdowo d wd dw ñ...c 789-456-1235.dwwd wd w wd wd444-555-6666...'
enctel(entrada)
# print(entrada)
# print(estel('123-456-7890'))
"""
# Con RegEx:
import re
entrada = 'asdlkasdjf 123-456-7890 skskdowo d wd dw ñ...c 789-456-1235.dwwd wd w wd wd444-555-6666...'
numtelRe = re.compile(r'\d\d\d-\d\d\d-\d\d\d\d') # crea un objeto "re" con el patrón
print(numtelRe.findall(entrada))# encuentra todas las coincidencias con el patrón.
| 29.354167
| 102
| 0.61462
|
0e8e0bc459d47923c37fdaeea7d28b7998f1afe6
| 4,507
|
py
|
Python
|
pylib/Tools/Fetch/FetchRPM.py
|
naughtont3/mtt
|
508761ca498dfd9902e508f87568d98eb0177903
|
[
"BSD-3-Clause-Open-MPI"
] | 58
|
2015-04-07T01:46:21.000Z
|
2022-01-29T02:56:32.000Z
|
pylib/Tools/Fetch/FetchRPM.py
|
naughtont3/mtt
|
508761ca498dfd9902e508f87568d98eb0177903
|
[
"BSD-3-Clause-Open-MPI"
] | 347
|
2015-01-06T12:53:27.000Z
|
2022-01-31T20:40:44.000Z
|
pylib/Tools/Fetch/FetchRPM.py
|
naughtont3/mtt
|
508761ca498dfd9902e508f87568d98eb0177903
|
[
"BSD-3-Clause-Open-MPI"
] | 46
|
2015-03-04T20:56:20.000Z
|
2022-03-18T18:28:47.000Z
|
# -*- coding: utf-8; tab-width: 4; indent-tabs-mode: f; python-indent: 4 -*-
#
# Copyright (c) 2015-2019 Intel, Inc. All rights reserved.
# Copyright (c) 2017-2018 Los Alamos National Security, LLC. All rights
# reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
import os
from urllib.parse import urlparse
from FetchMTTTool import *
from distutils.spawn import find_executable
import sys
import shutil
## @addtogroup Tools
# @{
# @addtogroup Fetch
# @section FetchRPM
# Plugin for fetching and locally installing rpms from the Web
# @param rpm rpm name (can be local file)
# @param url URL to where the rpm can be found if other than repository
# @param query Command to use to query pre-existing installation
# @param install Command to use to install the package
# @param sudo Superuser authority required
# @}
class FetchRPM(FetchMTTTool):
def __init__(self):
# initialise parent class
FetchMTTTool.__init__(self)
self.activated = False
# track the repos we have processed so we
# don't do them multiple times
self.done = {}
self.options = {}
self.options['rpm'] = (None, "rpm name - can be local file")
self.options['query'] = ("rpm -q", "Command to use to query pre-existing installation")
self.options['install'] = ("rpm -i", "Command to use to install the package")
self.options['sudo'] = (False, "Superuser authority required")
return
def activate(self):
if not self.activated:
# use the automatic procedure from IPlugin
IPlugin.activate(self)
return
def deactivate(self):
IPlugin.deactivate(self)
return
def print_name(self):
return "FetchRPM"
def print_options(self, testDef, prefix):
lines = testDef.printOptions(self.options)
for line in lines:
print(prefix + line)
return
def execute(self, log, keyvals, testDef):
testDef.logger.verbose_print("FetchRPM Execute")
# parse any provided options - these will override the defaults
cmds = {}
testDef.parseOptions(log, self.options, keyvals, cmds)
# check that they gave us an rpm namne
try:
if cmds['rpm'] is not None:
rpm = cmds['rpm']
except KeyError:
log['status'] = 1
log['stderr'] = "No RPM was provided"
return
testDef.logger.verbose_print("Download rpm " + rpm)
# check to see if we have already processed this rpm
try:
if self.done[rpm] is not None:
log['status'] = self.done[rpm]
log['stdout'] = "RPM " + rpm + " has already been processed"
return
except KeyError:
pass
# look for the executable in our path - this is
# a standard system executable so we don't use
# environmental modules here
basecmd = cmds['query'].split(' ',1)[0]
if not find_executable(basecmd):
log['status'] = 1
log['stderr'] = "Executable " + basecmd + " not found"
return
# see if the rpm has already been installed on the system
testDef.logger.verbose_print("checking system for rpm: " + rpm)
qcmd = []
if cmds['sudo']:
qcmd.append("sudo")
tmp = cmds['query'].split()
for t in tmp:
qcmd.append(t)
qcmd.append(rpm)
results = testDef.execmd.execute(None, qcmd, testDef)
if 0 == results['status']:
log['status'] = 0
log['stdout'] = "RPM " + rpm + " already exists on system"
return
# setup to install
icmd = []
if cmds['sudo']:
icmd.append("sudo")
tmp = cmds['install'].split()
for t in tmp:
icmd.append(t)
icmd.append(rpm)
testDef.logger.verbose_print("installing package " + rpm)
results = testDef.execmd.execute(None, icmd, testDef)
if 0 != results['status']:
log['status'] = 1
log['stderr'] = "install of " + rpm + " FAILED"
return
# record the result
log['status'] = results['status']
log['stdout'] = results['stdout']
log['stderr'] = results['stderr']
# track that we serviced this one
self.done[rpm] = results['status']
return
| 32.89781
| 95
| 0.578877
|
29b57839cb0b1273cca31450ecc2823ab3916327
| 1,098
|
py
|
Python
|
setup.py
|
softfire-eu/physical-device-manager
|
737e857e1b47b46f35bf1c47117e1251341619d8
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
softfire-eu/physical-device-manager
|
737e857e1b47b46f35bf1c47117e1251341619d8
|
[
"Apache-2.0"
] | 1
|
2018-01-17T10:37:20.000Z
|
2018-01-17T10:37:20.000Z
|
setup.py
|
softfire-eu/physical-device-manager
|
737e857e1b47b46f35bf1c47117e1251341619d8
|
[
"Apache-2.0"
] | null | null | null |
import os
from setuptools import setup, find_packages
def read(fname):
readme_file_path = os.path.join(os.path.dirname(__file__), fname)
if os.path.exists(readme_file_path) and os.path.isfile(readme_file_path):
readme_file = open(readme_file_path)
return readme_file.read()
else:
return "The SoftFIRE Physical Device Manager"
setup(
name="physical-device-manager",
version="1.0.6",
author="SoftFIRE",
author_email="softfire@softfire.eu",
description="The SoftFIRE Physical Device Manager",
license="Apache 2",
keywords="python vnfm nfvo open baton openbaton sdk experiment manager softfire tosca openstack rest",
url="http://softfire.eu/",
packages=find_packages(),
scripts=["physical-device-manager"],
install_requires=[
'softfire-sdk==1.1.4',
'requests',
'PyYAML'
],
long_description=read('README.rst'),
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
],
)
| 28.153846
| 106
| 0.663934
|
ae74f8a8fff1160482aa5d20216355a9ab8b5003
| 14,092
|
py
|
Python
|
testscripts/RDKB/component/WIFIAgent/TS_WIFIAGENT_ForceDisable_SetWiFiSSIDParams.py
|
rdkcmf/rdkb-tools-tdkb
|
9f9c3600cd701d5fc90ac86a6394ebd28d49267e
|
[
"Apache-2.0"
] | null | null | null |
testscripts/RDKB/component/WIFIAgent/TS_WIFIAGENT_ForceDisable_SetWiFiSSIDParams.py
|
rdkcmf/rdkb-tools-tdkb
|
9f9c3600cd701d5fc90ac86a6394ebd28d49267e
|
[
"Apache-2.0"
] | null | null | null |
testscripts/RDKB/component/WIFIAgent/TS_WIFIAGENT_ForceDisable_SetWiFiSSIDParams.py
|
rdkcmf/rdkb-tools-tdkb
|
9f9c3600cd701d5fc90ac86a6394ebd28d49267e
|
[
"Apache-2.0"
] | null | null | null |
##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2020 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version='1.0' encoding='utf-8'?>
<xml>
<id></id>
<!-- Do not edit id. This will be auto filled while exporting. If you are adding a new script keep the id empty -->
<version>4</version>
<!-- Do not edit version. This will be auto incremented while updating. If you are adding a new script you can keep the vresion as 1 -->
<name>TS_WIFIAGENT_ForceDisable_SetWiFiSSIDParams</name>
<!-- If you are adding a new script you can specify the script name. Script Name should be unique same as this file name with out .py extension -->
<primitive_test_id></primitive_test_id>
<!-- Do not change primitive_test_id if you are editing an existing script. -->
<primitive_test_name>WIFIAgent_Get</primitive_test_name>
<!-- -->
<primitive_test_version>1</primitive_test_version>
<!-- -->
<status>FREE</status>
<!-- -->
<synopsis>To check if VAP 1-6 SSID's are not writable when WiFi Force Disable is enabled</synopsis>
<!-- -->
<groups_id />
<!-- -->
<execution_time>10</execution_time>
<!-- -->
<long_duration>false</long_duration>
<!-- -->
<advanced_script>false</advanced_script>
<!-- execution_time is the time out time for test execution -->
<remarks></remarks>
<!-- Reason for skipping the tests if marked to skip -->
<skip>false</skip>
<!-- -->
<box_types>
<box_type>Broadband</box_type>
<!-- -->
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
<!-- -->
</rdk_versions>
<test_cases>
<test_case_id>TC_WIFIAGENT_131</test_case_id>
<test_objective>This test case is to check if VAP 1-6 SSID's are not writable when WiFi Force Disable is enabled</test_objective>
<test_type>Positive</test_type>
<test_setup>Broadband</test_setup>
<pre_requisite>1.Ccsp Components in DUT should be in a running state that includes component under test Cable Modem
2.TDK Agent should be in running state or invoke it through StartTdk.sh script</pre_requisite>
<api_or_interface_used>WIFIAgent_Get
WIFIAgent_Set
</api_or_interface_used>
<input_parameters>Device.WiFi.SSID.1.SSID
Device.WiFi.SSID.2.SSID
Device.WiFi.SSID.3.SSID
Device.WiFi.SSID.4.SSID
Device.WiFi.SSID.5.SSID
Device.WiFi.SSID.6.SSID
Device.WiFi.X_RDK-CENTRAL_COM_ForceDisable</input_parameters>
<automation_approch>1.Load the module
2.Get the current value for Device.WiFi.X_RDK-CENTRAL_COM_ForceDisable
3.Enable Device.WiFi.X_RDK-CENTRAL_COM_ForceDisable
4.Do a write operation on the following parameters
"Device.WiFi.SSID.1.SSID","Device.WiFi.SSID.2.SSID","Device.WiFi.SSID.3.SSID","Device.WiFi.SSID.4.SSID","Device.WiFi.SSID.5.SSID","Device.WiFi.SSID.6.SSID" ,and this write operation is expected to fail
5.Check if log message WIFI_ATTEMPT_TO_CHANGE_CONFIG_WHEN_FORCE_DISABLED" is present in WiFilog.txt.0 each time write operation is done
6.Unload the module</automation_approch>
<expected_output>Write operation on the listed wifi parameters should fail and
"WIFI_ATTEMPT_TO_CHANGE_CONFIG_WHEN_FORCE_DISABLED " message should be present in WiFilog.txt.0 each time a set operation is done when WiFi Force Disable is enabled</expected_output>
<priority>High</priority>
<test_stub_interface>WIFIAGENT</test_stub_interface>
<test_script>TS_WIFIAGENT_ForceDisable_SetWiFiSSIDParams</test_script>
<skipped>No</skipped>
<release_version>M84</release_version>
<remarks>None</remarks>
</test_cases>
<script_tags />
</xml>
'''
# use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
from time import sleep;
from tdkbVariables import *;
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("wifiagent","1");
obj1= tdklib.TDKScriptingLibrary("sysutil","1");
#IP and Port of box, No need to change,
#This will be replaced with corresponding DUT Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_WIFIAGENT_ForceDisable_SetWiFiSSIDParams');
obj1.configureTestCase(ip,port,'TS_WIFIAGENT_ForceDisable_SetWiFiSSIDParams');
#result of connection with test component and DUT
loadmodulestatus=obj.getLoadModuleResult();
loadmodulestatus1=obj1.getLoadModuleResult();
def getTelLogFileTotalLinesCount(tdkTestObj):
expectedresult="SUCCESS";
linecount =0;
RPCCmd = "sh %s/tdk_utility.sh parseConfigFile RPC_CMD" %TDK_PATH;
tdkTestObj.addParameter("command", RPCCmd);
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
RPC_CMD = tdkTestObj.getResultDetails().strip()
RPC_CMD = RPC_CMD.replace("\\n", "");
if RPC_CMD:
print "The device needs rpc command";
cmd = RPC_CMD + " \"cat /rdklogs/logs/WiFilog.txt.0 | wc -l \" | grep -v \"*\" | sed -r \"/^\s*$/d\" ";
else:
cmd = "cat /rdklogs/logs/WiFilog.txt.0| wc -l";
tdkTestObj.addParameter("command",cmd);
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails().strip().replace("\\n", "");
if expectedresult in actualresult:
print "current WiFilog.txt.0 line count:",details;
if details.isdigit():
linecount = int(details);
return actualresult,linecount;
def SetOperation(tdkTestObj,parameter):
expectedresult="FAILURE";
tdkTestObj.addParameter("paramName",parameter)
tdkTestObj.addParameter("paramValue", "tdkbtestcase");
tdkTestObj.addParameter("paramType","string")
tdkTestObj.executeTestCase("expectedresult");
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
return actualresult,expectedresult;
if "SUCCESS" in loadmodulestatus.upper() and "SUCCESS" in loadmodulestatus1.upper():
#Set the result status of execution
obj.setLoadModuleStatus("SUCCESS")
obj1.setLoadModuleStatus("SUCCESS")
expectedresult="SUCCESS";
tdkTestObj = obj.createTestStep('WIFIAgent_Get');
tdkTestObj.addParameter("paramName","Device.WiFi.X_RDK-CENTRAL_COM_ForceDisable")
tdkTestObj.executeTestCase("expectedresult");
actualresult = tdkTestObj.getResult();
default = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
default = default.split("VALUE:")[1].split(" ")[0].strip();
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 1: Get the current WiFi Force Disable state";
print "EXPECTED RESULT 1: Should get current WiFi Force Disable state";
print "ACTUAL RESULT 1: current WiFi Force Disable state is %s" %default;
print "[TEST EXECUTION RESULT] : SUCCESS";
tdkTestObj = obj.createTestStep('WIFIAgent_Set');
tdkTestObj.addParameter("paramName","Device.WiFi.X_RDK-CENTRAL_COM_ForceDisable")
tdkTestObj.addParameter("paramValue", "true");
tdkTestObj.addParameter("paramType","boolean")
tdkTestObj.executeTestCase("expectedresult");
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 2: Enable the WiFi Force Disable";
print "EXPECTED RESULT 2: Should enable Force Disable state";
print "ACTUAL RESULT 2: %s" %details;
print "[TEST EXECUTION RESULT] : SUCCESS";
params = ["Device.WiFi.SSID.1.SSID","Device.WiFi.SSID.2.SSID","Device.WiFi.SSID.3.SSID","Device.WiFi.SSID.4.SSID","Device.WiFi.SSID.5.SSID","Device.WiFi.SSID.6.SSID"]
for parameter in params:
tdkTestObj = obj1.createTestStep('ExecuteCmd');
lineCountResult, initialLinesCount = getTelLogFileTotalLinesCount(tdkTestObj);
if expectedresult in lineCountResult:
tdkTestObj.setResultStatus("SUCCESS");
tdkTestObj = obj.createTestStep('WIFIAgent_Set');
print "***performing write operation on %s ****" %parameter;
actualresult,expectedResult= SetOperation(tdkTestObj,parameter);
if expectedResult in actualresult:
sleep(10);
tdkTestObj = obj1.createTestStep('ExecuteCmd');
lineCountResult1, lineCountAfterSimu = getTelLogFileTotalLinesCount(tdkTestObj);
if expectedresult in lineCountResult1:
tdkTestObj = obj1.createTestStep('ExecuteCmd');
RPCCmd = "sh %s/tdk_utility.sh parseConfigFile RPC_CMD" %TDK_PATH;
tdkTestObj.addParameter("command", RPCCmd);
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
RPC_CMD = tdkTestObj.getResultDetails().strip();
RPC_CMD = RPC_CMD.replace("\\n", "");
if RPC_CMD:
print "The device needs rpc command";
cmd = RPC_CMD + " 'sed -n -e %s,%sp /rdklogs/logs/WiFilog.txt.0 | grep -i \"WIFI_ATTEMPT_TO_CHANGE_CONFIG_WHEN_FORCE_DISABLED\" ' "%(initialLinesCount,lineCountAfterSimu);
else:
cmd = "sed -n -e %s,%sp /rdklogs/logs/WiFilog.txt.0 | grep -i \"WIFI_ATTEMPT_TO_CHANGE_CONFIG_WHEN_FORCE_DISABLED\"" %(initialLinesCount,lineCountAfterSimu) ;
print "cmd:",cmd;
print "WIFI_ATTEMPT_TO_CHANGE_CONFIG_WHEN_FORCE_DISABLED log message should be present in WiFilog.txt.0";
tdkTestObj.addParameter("command", cmd);
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails().strip().replace("\\n", "");
if expectedresult in actualresult and "WIFI_ATTEMPT_TO_CHANGE_CONFIG_WHEN_FORCE_DISABLED" in details:
tdkTestObj.setResultStatus("SUCCESS");
print details;
print "[TEST EXECUTION RESULT] :SUCCESS";
print "********************************************";
else:
tdkTestObj.setResultStatus("FAILURE");
print "WIFI_ATTEMPT_TO_CHANGE_CONFIG_WHEN_FORCE_DISABLED didnot populate when trying to set %s in WiFilog.txt.0" %parameter;
print "[TEST EXECUTION RESULT] :FAILURE";
print "*****************************************";
else:
tdkTestObj.setResultStatus("FAILURE");
print "*******Failed get the line count of the log file*****";
else:
tdkTestObj.setResultStatus("FAILURE");
print "%s set was success even with Device.WiFi.X_RDK-CENTRAL_COM_ForceDisable being enabled";
print "*********************************************"
else:
tdkTestObj.setResultStatus("FAILURE");
print "*******Failed get the line count of the log file*****";
#Revertion
tdkTestObj = obj.createTestStep('WIFIAgent_Set');
tdkTestObj.addParameter("paramName","Device.WiFi.X_RDK-CENTRAL_COM_ForceDisable")
tdkTestObj.addParameter("paramValue", default);
tdkTestObj.addParameter("paramType","boolean")
tdkTestObj.executeTestCase("expectedresult");
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP : Revert the WiFi Force Disable to previous";
print "EXPECTED RESULT : Should revert Force Disable state to %s" %default;
print "ACTUAL RESULT : %s" %details;
print "[TEST EXECUTION RESULT] : SUCCESS";
else:
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP : Revert the WiFi Force Disable to previous";
print "EXPECTED RESULT : Should revert Force Disable state to %s" %default;
print "ACTUAL RESULT : %s" %details;
print "[TEST EXECUTION RESULT] : FAILURE";
else:
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 2: Disable the WiFi Force Disable";
print "EXPECTED RESULT 2: Should Disable Force Disable state";
print "ACTUAL RESULT 2: %s" %details;
print "[TEST EXECUTION RESULT] : FAILURE";
else:
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 1: Get the current WiFi Force Disable state";
print "EXPECTED RESULT 1: Should get current WiFi Force Disable state";
print "ACTUAL RESULT 1: current WiFi Force Disable state is %s" %default;
print "[TEST EXECUTION RESULT] : FAILURE";
obj.unloadModule("wifiagent")
obj1.unloadModule("sysutil");
else:
print "Failed to load wifiagent/sysutil module";
obj.setLoadModuleStatus("FAILURE");
obj1.setLoadModuleStatus("FAILURE");
| 52.192593
| 201
| 0.650653
|
8535ec085c4d4f7ac531c84a3ed1a2d750de8cb2
| 844
|
py
|
Python
|
sphinxcontrib_django_urls/__init__.py
|
mkalioby/sphinxcontrib-django-urls
|
89cd2ec41b7e685fbb810f43740e55c7212d4c0b
|
[
"MIT"
] | null | null | null |
sphinxcontrib_django_urls/__init__.py
|
mkalioby/sphinxcontrib-django-urls
|
89cd2ec41b7e685fbb810f43740e55c7212d4c0b
|
[
"MIT"
] | null | null | null |
sphinxcontrib_django_urls/__init__.py
|
mkalioby/sphinxcontrib-django-urls
|
89cd2ec41b7e685fbb810f43740e55c7212d4c0b
|
[
"MIT"
] | null | null | null |
try:
from django.urls import get_resolver
except:
try:
from django.core.urlresolvers import get_resolver
except:
raise Exception("Can't find Django")
__version__ = "0.1"
def setup(app):
app.connect('autodoc-process-docstring', add_django_url)
def add_django_url(app, what, name, obj, options, lines):
if what == 'function':
res = get_resolver()
if obj in res.reverse_dict:
url_struct = res.reverse_dict[obj]
if len(url_struct) > 0:
lines.append("URL path(s):")
for url in url_struct[:-2]:
if type(url) == type([]): continue
lines.append(" * %s\n" % url)
else:
lines.insert(0,"| has NO URL mapping\n")
else:
lines.append("URL path(s): NONE")
| 28.133333
| 60
| 0.550948
|
f3e8568e248bac08e9664765966779b1bbcaeeb9
| 5,292
|
py
|
Python
|
people_counter.py
|
Walabot-Projects/PeopleCounter
|
5674a0ee979eb069032a17c645c4da29d9a53412
|
[
"MIT"
] | 15
|
2016-08-03T15:10:54.000Z
|
2022-03-30T21:22:06.000Z
|
people_counter.py
|
Walabot-Projects/PeopleCounter
|
5674a0ee979eb069032a17c645c4da29d9a53412
|
[
"MIT"
] | null | null | null |
people_counter.py
|
Walabot-Projects/PeopleCounter
|
5674a0ee979eb069032a17c645c4da29d9a53412
|
[
"MIT"
] | 2
|
2017-01-11T04:08:03.000Z
|
2021-11-16T03:54:08.000Z
|
from __future__ import print_function # WalabotAPI works on both Python 2 an 3.
from sys import platform
from os import system
from imp import load_source
from enum import Enum
import WalabotAPI as walabotAPI
walabotAPI.Init()
def PrintTrackerTargets(targets):
system('cls' if platform == 'win32' else 'clear')
if targets:
for i, target in enumerate(targets):
print(('y: {}'.format(target.yPosCm)))
else:
print('No Target Detected')
##################################################
####### People Counter Class and Logic #######
##################################################
# ----------------------------
# Outside | | | Inside
# The | Back | Front | the
# Room | | | Room
# ----------------------------
class Placement(Enum):
Empty = 0 # Target not in the arena
Back = 1 # Target in the back of the arena
Front = 2 # Target in the front of the arena
class State(Enum):
Idle = 0 # Nobody in the arena
Bi = 1 # In the back - coming in
Fi = 2 # In the front - coming in
Bo = 3 # In the back - coming out
Fo = 4 # In the front - coming out
def _get_placement(targets):
if len(targets) is 0:
return Placement.Empty
if targets[0].yPosCm > 0:
return Placement.Front
if targets[0].yPosCm <= 0:
return Placement.Back
class PeopleCounter:
def __init__(self):
self.placement = Placement.Empty
self.state = State.Idle
self.count = 0
self.state_machine = {
State.Idle:
{Placement.Empty: State.Idle,
Placement.Back: State.Bi,
Placement.Front: State.Fo},
State.Bi:
{Placement.Empty: State.Idle,
Placement.Back: State.Bi,
Placement.Front: State.Fi},
State.Fi:
{Placement.Empty: State.Idle, # increment
Placement.Back: State.Bi,
Placement.Front: State.Fi},
State.Fo:
{Placement.Empty: State.Idle,
Placement.Back: State.Bo,
Placement.Front: State.Fo},
State.Bo:
{Placement.Empty: State.Idle, # increment
Placement.Back: State.Bo,
Placement.Front: State.Fo},
}
def update_state_get_count(self, targets):
self.placement = _get_placement(targets)
prev_state = self.state
self.state = self.state_machine[self.state][self.placement]
if prev_state == State.Bo and self.state == State.Idle:
self._decrement()
elif prev_state == State.Fi and self.state == State.Idle:
self._increment()
return self.count
def _increment(self):
self.count += 1
return State.Idle
def _decrement(self):
self.count = max(self.count - 1, 0)
return State.Idle
def PeopleCounterApp():
# PeopleCounter object
people_counter = PeopleCounter()
# walabotAPI.SetArenaR - input parameters
rArenaMin, rArenaMax, rArenaRes = 5, 120, 5
# walabotAPI.SetArenaPhi - input parameters
phiArenaMin, phiArenaMax, phiArenaRes = -60, 60, 3
# walabotAPI.SetArenaTheta - input parameters
thetaArenaMin, thetaArenaMax, thetaArenaRes = -20, 20, 10
# Configure Walabot database install location (for windows)
walabotAPI.SetSettingsFolder()
# 1) Connect: Establish communication with walabot.
walabotAPI.ConnectAny()
# 2) Configure: Set scan profile and arena
# Set Profile - to Tracker.
walabotAPI.SetProfile(walabotAPI.PROF_TRACKER)
# Set arena by Polar coordinates, with arena resolution
walabotAPI.SetArenaR(rArenaMin, rArenaMax, rArenaRes)
walabotAPI.SetArenaPhi(phiArenaMin, phiArenaMax, phiArenaRes)
walabotAPI.SetArenaTheta(thetaArenaMin, thetaArenaMax, thetaArenaRes)
# Walabot filtering MTI
walabotAPI.SetDynamicImageFilter(walabotAPI.FILTER_TYPE_MTI)
# 3) Start: Start the system in preparation for scanning.
walabotAPI.Start()
try:
num_of_people = 0
while True:
# 4) Trigger: Scan (sense) according to profile and record signals
# to be available for processing and retrieval.
walabotAPI.Trigger()
# 5) Get action: retrieve the last completed triggered recording
targets = walabotAPI.GetTrackerTargets()
# 6) Sort targets by amplitude
targets = sorted(targets, key=lambda x: x.zPosCm, reverse=True)
# 7) Update state and get people count
prev_num_of_people = num_of_people
num_of_people = people_counter.update_state_get_count(targets)
if prev_num_of_people != num_of_people:
print('# {} #\n'.format(num_of_people))
# print y-axis of target found
# PrintTrackerTargets(targets)
except KeyboardInterrupt:
pass
finally:
# 7) Stop and Disconnect.
walabotAPI.Stop()
walabotAPI.Disconnect()
walabotAPI.Clean()
print('Terminated successfully!')
if __name__ == '__main__':
PeopleCounterApp()
| 34.141935
| 79
| 0.590703
|
8e439b42dd91ef8113e0e7ced9212d5eb94f1d1b
| 30,433
|
py
|
Python
|
evennia/server/sessionhandler.py
|
zeitkunst/evennia
|
1f254b2542fbefe400c114b3d7029522cdcb37b7
|
[
"BSD-3-Clause"
] | 3
|
2019-08-08T16:58:25.000Z
|
2019-10-12T07:31:36.000Z
|
evennia/server/sessionhandler.py
|
zeitkunst/evennia
|
1f254b2542fbefe400c114b3d7029522cdcb37b7
|
[
"BSD-3-Clause"
] | 9
|
2019-09-06T18:21:59.000Z
|
2022-01-13T03:04:11.000Z
|
evennia/server/sessionhandler.py
|
zeitkunst/evennia
|
1f254b2542fbefe400c114b3d7029522cdcb37b7
|
[
"BSD-3-Clause"
] | 2
|
2019-09-02T08:39:24.000Z
|
2019-09-02T18:39:32.000Z
|
"""
This module defines handlers for storing sessions when handles
sessions of users connecting to the server.
There are two similar but separate stores of sessions:
- ServerSessionHandler - this stores generic game sessions
for the game. These sessions has no knowledge about
how they are connected to the world.
- PortalSessionHandler - this stores sessions created by
twisted protocols. These are dumb connectors that
handle network communication but holds no game info.
"""
import time
from builtins import object
from future.utils import listvalues
from django.conf import settings
from evennia.commands.cmdhandler import CMD_LOGINSTART
from evennia.utils.logger import log_trace
from evennia.utils.utils import (variable_from_module, is_iter,
to_str, to_unicode,
make_iter, delay,
callables_from_module)
from evennia.utils.inlinefuncs import parse_inlinefunc
try:
import cPickle as pickle
except ImportError:
import pickle
_INLINEFUNC_ENABLED = settings.INLINEFUNC_ENABLED
# delayed imports
_AccountDB = None
_ServerSession = None
_ServerConfig = None
_ScriptDB = None
_OOB_HANDLER = None
class DummySession(object):
sessid = 0
DUMMYSESSION = DummySession()
# AMP signals
PCONN = chr(1) # portal session connect
PDISCONN = chr(2) # portal session disconnect
PSYNC = chr(3) # portal session sync
SLOGIN = chr(4) # server session login
SDISCONN = chr(5) # server session disconnect
SDISCONNALL = chr(6) # server session disconnect all
SSHUTD = chr(7) # server shutdown
SSYNC = chr(8) # server session sync
SCONN = chr(11) # server portal connection (for bots)
PCONNSYNC = chr(12) # portal post-syncing session
PDISCONNALL = chr(13) # portal session discnnect all
SRELOAD = chr(14) # server reloading (have portal start a new server)
SSTART = chr(15) # server start (portal must already be running anyway)
PSHUTD = chr(16) # portal (+server) shutdown
SSHUTD = chr(17) # server shutdown
PSTATUS = chr(18) # ping server or portal status
SRESET = chr(19) # server shutdown in reset mode
# i18n
from django.utils.translation import ugettext as _
_SERVERNAME = settings.SERVERNAME
_MULTISESSION_MODE = settings.MULTISESSION_MODE
_IDLE_TIMEOUT = settings.IDLE_TIMEOUT
_DELAY_CMD_LOGINSTART = settings.DELAY_CMD_LOGINSTART
_MAX_SERVER_COMMANDS_PER_SECOND = 100.0
_MAX_SESSION_COMMANDS_PER_SECOND = 5.0
_MODEL_MAP = None
# input handlers
_INPUT_FUNCS = {}
for modname in make_iter(settings.INPUT_FUNC_MODULES):
_INPUT_FUNCS.update(callables_from_module(modname))
def delayed_import():
"""
Helper method for delayed import of all needed entities.
"""
global _ServerSession, _AccountDB, _ServerConfig, _ScriptDB
if not _ServerSession:
# we allow optional arbitrary serversession class for overloading
modulename, classname = settings.SERVER_SESSION_CLASS.rsplit(".", 1)
_ServerSession = variable_from_module(modulename, classname)
if not _AccountDB:
from evennia.accounts.models import AccountDB as _AccountDB
if not _ServerConfig:
from evennia.server.models import ServerConfig as _ServerConfig
if not _ScriptDB:
from evennia.scripts.models import ScriptDB as _ScriptDB
# including once to avoid warnings in Python syntax checkers
assert(_ServerSession)
assert(_AccountDB)
assert(_ServerConfig)
assert(_ScriptDB)
#-----------------------------------------------------------
# SessionHandler base class
#------------------------------------------------------------
class SessionHandler(dict):
"""
This handler holds a stack of sessions.
"""
def __getitem__(self, key):
"Clean out None-sessions automatically."
if None in self:
del self[None]
return super(SessionHandler, self).__getitem__(key)
def get(self, key, default=None):
"Clean out None-sessions automatically."
if None in self:
del self[None]
return super(SessionHandler, self).get(key, default)
def __setitem__(self, key, value):
"Don't assign None sessions"
if key is not None:
super(SessionHandler, self).__setitem__(key, value)
def __contains__(self, key):
"None-keys are not accepted."
return False if key is None else super(SessionHandler, self).__contains__(key)
def get_sessions(self, include_unloggedin=False):
"""
Returns the connected session objects.
Args:
include_unloggedin (bool, optional): Also list Sessions
that have not yet authenticated.
Returns:
sessions (list): A list of `Session` objects.
"""
if include_unloggedin:
return listvalues(self)
else:
return [session for session in self.values() if session.logged_in]
def get_all_sync_data(self):
"""
Create a dictionary of sessdata dicts representing all
sessions in store.
Returns:
syncdata (dict): A dict of sync data.
"""
return dict((sessid, sess.get_sync_data()) for sessid, sess in self.items())
def clean_senddata(self, session, kwargs):
"""
Clean up data for sending across the AMP wire. Also apply INLINEFUNCS.
Args:
session (Session): The relevant session instance.
kwargs (dict) Each keyword represents a
send-instruction, with the keyword itself being the name
of the instruction (like "text"). Suitable values for each
keyword are:
- arg -> [[arg], {}]
- [args] -> [[args], {}]
- {kwargs} -> [[], {kwargs}]
- [args, {kwargs}] -> [[arg], {kwargs}]
- [[args], {kwargs}] -> [[args], {kwargs}]
Returns:
kwargs (dict): A cleaned dictionary of cmdname:[[args],{kwargs}] pairs,
where the keys, args and kwargs have all been converted to
send-safe entities (strings or numbers), and inlinefuncs have been
applied.
"""
options = kwargs.pop("options", None) or {}
raw = options.get("raw", False)
strip_inlinefunc = options.get("strip_inlinefunc", False)
def _validate(data):
"Helper function to convert data to AMP-safe (picketable) values"
if isinstance(data, dict):
newdict = {}
for key, part in data.items():
newdict[key] = _validate(part)
return newdict
elif hasattr(data, "__iter__"):
return [_validate(part) for part in data]
elif isinstance(data, basestring):
# make sure strings are in a valid encoding
try:
data = data and to_str(to_unicode(data), encoding=session.protocol_flags["ENCODING"])
except LookupError:
# wrong encoding set on the session. Set it to a safe one
session.protocol_flags["ENCODING"] = "utf-8"
data = to_str(to_unicode(data), encoding=session.protocol_flags["ENCODING"])
if _INLINEFUNC_ENABLED and not raw and isinstance(self, ServerSessionHandler):
# only parse inlinefuncs on the outgoing path (sessionhandler->)
data = parse_inlinefunc(data, strip=strip_inlinefunc, session=session)
# At this point the object is certainly the right encoding, but may still be a unicode object--
# to_str does not actually force objects to become bytestrings.
# If the unicode object is a subclass of unicode, such as ANSIString, this can cause a problem,
# as special behavior for that class will still be in play. Since we're now transferring raw data,
# we must now force this to be a proper bytestring.
return str(data)
elif hasattr(data, "id") and hasattr(data, "db_date_created") \
and hasattr(data, '__dbclass__'):
# convert database-object to their string representation.
return _validate(unicode(data))
else:
return data
rkwargs = {}
for key, data in kwargs.iteritems():
key = _validate(key)
if not data:
if key == "text":
# we don't allow sending text = None, this must mean
# that the text command is not to be used.
continue
rkwargs[key] = [[], {}]
elif isinstance(data, dict):
rkwargs[key] = [[], _validate(data)]
elif hasattr(data, "__iter__"):
if isinstance(data[-1], dict):
if len(data) == 2:
if hasattr(data[0], "__iter__"):
rkwargs[key] = [_validate(data[0]), _validate(data[1])]
else:
rkwargs[key] = [[_validate(data[0])], _validate(data[1])]
else:
rkwargs[key] = [_validate(data[:-1]), _validate(data[-1])]
else:
rkwargs[key] = [_validate(data), {}]
else:
rkwargs[key] = [[_validate(data)], {}]
rkwargs[key][1]["options"] = options
return rkwargs
#------------------------------------------------------------
# Server-SessionHandler class
#------------------------------------------------------------
class ServerSessionHandler(SessionHandler):
"""
This object holds the stack of sessions active in the game at
any time.
A session register with the handler in two steps, first by
registering itself with the connect() method. This indicates an
non-authenticated session. Whenever the session is authenticated
the session together with the related account is sent to the login()
method.
"""
# AMP communication methods
def __init__(self, *args, **kwargs):
"""
Init the handler.
"""
super(ServerSessionHandler, self).__init__(*args, **kwargs)
self.server = None # set at server initialization
self.server_data = {"servername": _SERVERNAME}
def _run_cmd_login(self, session):
"""
Launch the CMD_LOGINSTART command. This is wrapped
for delays.
"""
if not session.logged_in:
self.data_in(session, text=[[CMD_LOGINSTART], {}])
def portal_connect(self, portalsessiondata):
"""
Called by Portal when a new session has connected.
Creates a new, unlogged-in game session.
Args:
portalsessiondata (dict): a dictionary of all property:value
keys defining the session and which is marked to be
synced.
"""
delayed_import()
global _ServerSession, _AccountDB, _ScriptDB
sess = _ServerSession()
sess.sessionhandler = self
sess.load_sync_data(portalsessiondata)
sess.at_sync()
# validate all scripts
_ScriptDB.objects.validate()
self[sess.sessid] = sess
if sess.logged_in and sess.uid:
# Session is already logged in. This can happen in the
# case of auto-authenticating protocols like SSH or
# webclient's session sharing
account = _AccountDB.objects.get_account_from_uid(sess.uid)
if account:
# this will set account.is_connected too
self.login(sess, account, force=True)
return
else:
sess.logged_in = False
sess.uid = None
# show the first login command, may delay slightly to allow
# the handshakes to finish.
delay(_DELAY_CMD_LOGINSTART, self._run_cmd_login, sess)
def portal_session_sync(self, portalsessiondata):
"""
Called by Portal when it wants to update a single session (e.g.
because of all negotiation protocols have finally replied)
Args:
portalsessiondata (dict): a dictionary of all property:value
keys defining the session and which is marked to be
synced.
"""
sessid = portalsessiondata.get("sessid")
session = self.get(sessid)
if session:
# since some of the session properties may have had
# a chance to change already before the portal gets here
# the portal doesn't send all sessiondata but only
# ones which should only be changed from portal (like
# protocol_flags etc)
session.load_sync_data(portalsessiondata)
def portal_sessions_sync(self, portalsessionsdata):
"""
Syncing all session ids of the portal with the ones of the
server. This is instantiated by the portal when reconnecting.
Args:
portalsessionsdata (dict): A dictionary
`{sessid: {property:value},...}` defining each session and
the properties in it which should be synced.
"""
delayed_import()
global _ServerSession, _AccountDB, _ServerConfig, _ScriptDB
for sess in self.values():
# we delete the old session to make sure to catch eventual
# lingering references.
del sess
for sessid, sessdict in portalsessionsdata.items():
sess = _ServerSession()
sess.sessionhandler = self
sess.load_sync_data(sessdict)
if sess.uid:
sess.account = _AccountDB.objects.get_account_from_uid(sess.uid)
self[sessid] = sess
sess.at_sync()
mode = 'reload'
# tell the server hook we synced
self.server.at_post_portal_sync(mode)
# announce the reconnection
self.announce_all(_(" ... Server restarted."))
def portal_disconnect(self, session):
"""
Called from Portal when Portal session closed from the portal
side. There is no message to report in this case.
Args:
session (Session): The Session to disconnect
"""
# disconnect us without calling Portal since
# Portal already knows.
self.disconnect(session, reason="", sync_portal=False)
def portal_disconnect_all(self):
"""
Called from Portal when Portal is closing down. All
Sessions should die. The Portal should not be informed.
"""
# set a watchdog to avoid self.disconnect from deleting
# the session while we are looping over them
self._disconnect_all = True
for session in self.values:
session.disconnect()
del self._disconnect_all
# server-side access methods
def start_bot_session(self, protocol_path, configdict):
"""
This method allows the server-side to force the Portal to
create a new bot session.
Args:
protocol_path (str): The full python path to the bot's
class.
configdict (dict): This dict will be used to configure
the bot (this depends on the bot protocol).
Examples:
start_bot_session("evennia.server.portal.irc.IRCClient",
{"uid":1, "botname":"evbot", "channel":"#evennia",
"network:"irc.freenode.net", "port": 6667})
Notes:
The new session will use the supplied account-bot uid to
initiate an already logged-in connection. The Portal will
treat this as a normal connection and henceforth so will
the Server.
"""
self.server.amp_protocol.send_AdminServer2Portal(DUMMYSESSION, operation=SCONN,
protocol_path=protocol_path, config=configdict)
def portal_restart_server(self):
"""
Called by server when reloading. We tell the portal to start a new server instance.
"""
self.server.amp_protocol.send_AdminServer2Portal(DUMMYSESSION, operation=SRELOAD)
def portal_reset_server(self):
"""
Called by server when reloading. We tell the portal to start a new server instance.
"""
self.server.amp_protocol.send_AdminServer2Portal(DUMMYSESSION, operation=SRESET)
def portal_shutdown(self):
"""
Called by server when it's time to shut down (the portal will shut us down and then shut
itself down)
"""
self.server.amp_protocol.send_AdminServer2Portal(DUMMYSESSION,
operation=PSHUTD)
def login(self, session, account, force=False, testmode=False):
"""
Log in the previously unloggedin session and the account we by
now should know is connected to it. After this point we assume
the session to be logged in one way or another.
Args:
session (Session): The Session to authenticate.
account (Account): The Account identified as associated with this Session.
force (bool): Login also if the session thinks it's already logged in
(this can happen for auto-authenticating protocols)
testmode (bool, optional): This is used by unittesting for
faking login without any AMP being actually active.
"""
if session.logged_in and not force:
# don't log in a session that is already logged in.
return
account.is_connected = True
# sets up and assigns all properties on the session
session.at_login(account)
# account init
account.at_init()
# Check if this is the first time the *account* logs in
if account.db.FIRST_LOGIN:
account.at_first_login()
del account.db.FIRST_LOGIN
account.at_pre_login()
if _MULTISESSION_MODE == 0:
# disconnect all previous sessions.
self.disconnect_duplicate_sessions(session)
nsess = len(self.sessions_from_account(account))
string = "Logged in: {account} {address} ({nsessions} session(s) total)"
string = string.format(account=account, address=session.address, nsessions=nsess)
session.log(string)
session.logged_in = True
# sync the portal to the session
if not testmode:
self.server.amp_protocol.send_AdminServer2Portal(session,
operation=SLOGIN,
sessiondata={"logged_in": True,
"uid": session.uid})
account.at_post_login(session=session)
def disconnect(self, session, reason="", sync_portal=True):
"""
Called from server side to remove session and inform portal
of this fact.
Args:
session (Session): The Session to disconnect.
reason (str, optional): A motivation for the disconnect.
sync_portal (bool, optional): Sync the disconnect to
Portal side. This should be done unless this was
called by self.portal_disconnect().
"""
session = self.get(session.sessid)
if not session:
return
if hasattr(session, "account") and session.account:
# only log accounts logging off
nsess = len(self.sessions_from_account(session.account)) - 1
sreason = " ({})".format(reason) if reason else ""
string = "Logged out: {account} {address} ({nsessions} sessions(s) remaining){reason}"
string = string.format(reason=sreason, account=session.account, address=session.address, nsessions=nsess)
session.log(string)
session.at_disconnect(reason)
sessid = session.sessid
if sessid in self and not hasattr(self, "_disconnect_all"):
del self[sessid]
if sync_portal:
# inform portal that session should be closed.
self.server.amp_protocol.send_AdminServer2Portal(session,
operation=SDISCONN,
reason=reason)
def all_sessions_portal_sync(self):
"""
This is called by the server when it reboots. It syncs all session data
to the portal. Returns a deferred!
"""
sessdata = self.get_all_sync_data()
return self.server.amp_protocol.send_AdminServer2Portal(DUMMYSESSION,
operation=SSYNC,
sessiondata=sessdata)
def session_portal_sync(self, session):
"""
This is called by the server when it wants to sync a single session
with the Portal for whatever reason. Returns a deferred!
"""
sessdata = {session.sessid: session.get_sync_data()}
return self.server.amp_protocol.send_AdminServer2Portal(DUMMYSESSION,
operation=SSYNC,
sessiondata=sessdata,
clean=False)
def session_portal_partial_sync(self, session_data):
"""
Call to make a partial update of the session, such as only a particular property.
Args:
session_data (dict): Store `{sessid: {property:value}, ...}` defining one or
more sessions in detail.
"""
return self.server.amp_protocol.send_AdminServer2Portal(DUMMYSESSION,
operation=SSYNC,
sessiondata=session_data,
clean=False)
def disconnect_all_sessions(self, reason="You have been disconnected."):
"""
Cleanly disconnect all of the connected sessions.
Args:
reason (str, optional): The reason for the disconnection.
"""
for session in self:
del session
# tell portal to disconnect all sessions
self.server.amp_protocol.send_AdminServer2Portal(DUMMYSESSION,
operation=SDISCONNALL,
reason=reason)
def disconnect_duplicate_sessions(self, curr_session,
reason=_("Logged in from elsewhere. Disconnecting.")):
"""
Disconnects any existing sessions with the same user.
args:
curr_session (Session): Disconnect all Sessions matching this one.
reason (str, optional): A motivation for disconnecting.
"""
uid = curr_session.uid
# we can't compare sessions directly since this will compare addresses and
# mean connecting from the same host would not catch duplicates
sid = id(curr_session)
doublet_sessions = [sess for sess in self.values()
if sess.logged_in and
sess.uid == uid and
id(sess) != sid]
for session in doublet_sessions:
self.disconnect(session, reason)
def validate_sessions(self):
"""
Check all currently connected sessions (logged in and not) and
see if any are dead or idle.
"""
tcurr = time.time()
reason = _("Idle timeout exceeded, disconnecting.")
for session in (session for session in self.values()
if session.logged_in and _IDLE_TIMEOUT > 0 and
(tcurr - session.cmd_last) > _IDLE_TIMEOUT):
self.disconnect(session, reason=reason)
def account_count(self):
"""
Get the number of connected accounts (not sessions since a
account may have more than one session depending on settings).
Only logged-in accounts are counted here.
Returns:
naccount (int): Number of connected accounts
"""
return len(set(session.uid for session in self.values() if session.logged_in))
def all_connected_accounts(self):
"""
Get a unique list of connected and logged-in Accounts.
Returns:
accounts (list): All conected Accounts (which may be fewer than the
amount of Sessions due to multi-playing).
"""
return list(set(session.account for session in self.values() if session.logged_in and session.account))
def session_from_sessid(self, sessid):
"""
Get session based on sessid, or None if not found
Args:
sessid (int or list): Session id(s).
Return:
sessions (Session or list): Session(s) found. This
is a list if input was a list.
"""
if is_iter(sessid):
return [self.get(sid) for sid in sessid if sid in self]
return self.get(sessid)
def session_from_account(self, account, sessid):
"""
Given an account and a session id, return the actual session
object.
Args:
account (Account): The Account to get the Session from.
sessid (int or list): Session id(s).
Returns:
sessions (Session or list): Session(s) found.
"""
sessions = [self[sid] for sid in make_iter(sessid)
if sid in self and self[sid].logged_in and account.uid == self[sid].uid]
return sessions[0] if len(sessions) == 1 else sessions
def sessions_from_account(self, account):
"""
Given an account, return all matching sessions.
Args:
account (Account): Account to get sessions from.
Returns:
sessions (list): All Sessions associated with this account.
"""
uid = account.uid
return [session for session in self.values() if session.logged_in and session.uid == uid]
def sessions_from_puppet(self, puppet):
"""
Given a puppeted object, return all controlling sessions.
Args:
puppet (Object): Object puppeted
Returns.
sessions (Session or list): Can be more than one of Object is controlled by
more than one Session (MULTISESSION_MODE > 1).
"""
sessions = puppet.sessid.get()
return sessions[0] if len(sessions) == 1 else sessions
sessions_from_character = sessions_from_puppet
def sessions_from_csessid(self, csessid):
"""
Given a cliend identification hash (for session types that offer them) return all sessions with
a matching hash.
Args
csessid (str): The session hash
"""
return [session for session in self.values()
if session.csessid and session.csessid == csessid]
def announce_all(self, message):
"""
Send message to all connected sessions
Args:
message (str): Message to send.
"""
for session in self.values():
self.data_out(session, text=message)
def data_out(self, session, **kwargs):
"""
Sending data Server -> Portal
Args:
session (Session): Session to relay to.
text (str, optional): text data to return
Notes:
The outdata will be scrubbed for sending across
the wire here.
"""
# clean output for sending
kwargs = self.clean_senddata(session, kwargs)
# send across AMP
self.server.amp_protocol.send_MsgServer2Portal(session,
**kwargs)
def get_inputfuncs(self):
"""
Get all registered inputfuncs (access function)
Returns:
inputfuncs (dict): A dict of {key:inputfunc,...}
"""
return _INPUT_FUNCS
def data_in(self, session, **kwargs):
"""
We let the data take a "detour" to session.data_in
so the user can override and see it all in one place.
That method is responsible to in turn always call
this class' `sessionhandler.call_inputfunc` with the
(possibly processed) data.
"""
if session:
session.data_in(**kwargs)
def call_inputfuncs(self, session, **kwargs):
"""
Split incoming data into its inputfunc counterparts.
This should be called by the serversession.data_in
as sessionhandler.call_inputfunc(self, **kwargs).
We also intercept OOB communication here.
Args:
sessions (Session): Session.
Kwargs:
kwargs (any): Incoming data from protocol on
the form `{"commandname": ((args), {kwargs}),...}`
"""
# distribute incoming data to the correct receiving methods.
if session:
input_debug = session.protocol_flags.get("INPUTDEBUG", False)
for cmdname, (cmdargs, cmdkwargs) in kwargs.iteritems():
cname = cmdname.strip().lower()
try:
cmdkwargs.pop("options", None)
if cname in _INPUT_FUNCS:
_INPUT_FUNCS[cname](session, *cmdargs, **cmdkwargs)
else:
_INPUT_FUNCS["default"](session, cname, *cmdargs, **cmdkwargs)
except Exception as err:
if input_debug:
session.msg(err)
log_trace()
SESSION_HANDLER = ServerSessionHandler()
SESSIONS = SESSION_HANDLER # legacy
| 36.888485
| 117
| 0.583183
|
dbc145cf8d94f0762e4ac540b0c2ee96816aaac4
| 3,946
|
py
|
Python
|
apps/Graph4Rec/env_run/src/datasets/node.py
|
zbmain/PGL
|
dbded6a1543248b0a33c05eb476ddc513401a774
|
[
"Apache-2.0"
] | 1
|
2022-03-25T12:04:51.000Z
|
2022-03-25T12:04:51.000Z
|
apps/Graph4Rec/env_run/src/datasets/node.py
|
zbmain/PGL
|
dbded6a1543248b0a33c05eb476ddc513401a774
|
[
"Apache-2.0"
] | null | null | null |
apps/Graph4Rec/env_run/src/datasets/node.py
|
zbmain/PGL
|
dbded6a1543248b0a33c05eb476ddc513401a774
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.append("../")
import time
import warnings
import numpy as np
from collections import defaultdict
from pgl.utils.logger import log
from pgl.distributed import DistGraphClient, DistGraphServer
from utils.config import prepare_config
class NodeGenerator(object):
def __init__(self, config, graph, **kwargs):
self.config = config
self.graph = graph
self.rank = kwargs.get("rank", 0)
self.nrank = kwargs.get("nrank", 1)
self.gen_mode = kwargs.get("gen_mode", "base_node_generator")
self.batch_node_size = self.config.batch_node_size
self.first_ntype_list = []
for mp in self.config.meta_path.split(";"):
self.first_ntype_list.append(mp.split("2")[0])
def __call__(self, generator=None):
"""
Args:
generator: fake generator, ignore it
"""
node_generator = getattr(self, self.gen_mode)
nodes_count = 0
for nodes in node_generator():
nodes_count += len(nodes[0])
yield nodes
log.info("total [%s] number nodes have been processed in rank [%s]" \
% (nodes_count, self.rank))
def infer_node_generator(self):
ntype_list = self.graph.get_node_types()
node_generators = {}
for ntype in ntype_list:
for batch_nodes in self.graph.node_batch_iter(
batch_size=self.batch_node_size,
shuffle=False,
node_type=ntype,
rank=self.rank,
nrank=self.nrank):
yield (batch_nodes, 0)
def base_node_generator(self):
ntype_list = self.graph.get_node_types()
node_generators = {}
for ntype in ntype_list:
node_generators[ntype] = self.graph.node_batch_iter(
batch_size=self.batch_node_size,
shuffle=True,
node_type=ntype,
rank=self.rank,
nrank=self.nrank)
num_ntype = len(self.first_ntype_list)
finished_node_types_set = set()
cc = 0
batch_count = defaultdict(lambda: 0)
epoch_flag = False
while True:
idx = cc % num_ntype
if idx not in finished_node_types_set:
ntype = self.first_ntype_list[idx]
try:
batch_nodes = next(node_generators[ntype])
batch_count[ntype] += 1
yield (batch_nodes, idx)
except StopIteration as e:
log.info(e)
msg = "nodes of type [%s] finished with [%s] batch iteration in rank [%s]" \
% (ntype, batch_count[ntype], self.rank)
log.info(msg)
finished_node_types_set.add(idx)
msg = ""
for x in list(finished_node_types_set):
msg += " [%s]" % self.first_ntype_list[x]
log.info("%s node types have been finished in rank [%s]." \
% (msg, self.rank))
if len(finished_node_types_set) == num_ntype:
epoch_flag = True
if epoch_flag:
break
cc += 1
| 34.614035
| 96
| 0.575013
|
a5e08450a3f10d84440b33b93134d43f65902dbd
| 1,312
|
py
|
Python
|
neo-collector/delete/deleteTxUpdateSQL.py
|
i25959341/Happynodes
|
ef6825d17b181c451476d4ef722f2bcfa52e0161
|
[
"MIT"
] | null | null | null |
neo-collector/delete/deleteTxUpdateSQL.py
|
i25959341/Happynodes
|
ef6825d17b181c451476d4ef722f2bcfa52e0161
|
[
"MIT"
] | null | null | null |
neo-collector/delete/deleteTxUpdateSQL.py
|
i25959341/Happynodes
|
ef6825d17b181c451476d4ef722f2bcfa52e0161
|
[
"MIT"
] | null | null | null |
import psycopg2
import time
import os
PGHOST = str(os.environ['PGHOST'])
PGDATABASE = str(os.environ['PGDATABASE'])
PGUSER = str(os.environ['PGUSER'])
PGPASSWORD = str(os.environ['PGPASSWORD'])
CONNECTION_STR = "dbname='{}' user='{}' host='{}' password='{}'".format(PGDATABASE, PGUSER, PGHOST, PGPASSWORD)
DELETE_SCRIPT = """DELETE
FROM unconfirmed_tx
WHERE last_blockheight+10 < (SELECT max(blockheight)
FROM blockheight_history
WHERE blockheight IS NOT NULL)"""
SLEEP_TIME = 60*60
class DeleteTxUpdateSQL:
def __init__(self, connect_str, sql_query, sleeptime):
self.connect_str=connect_str
self.sql_query = sql_query
self.sleeptime = sleeptime
def run(self):
while True:
self.update()
print("Sleeping")
# Run hourly
time.sleep(self.sleeptime)
def update(self):
self.conn = psycopg2.connect(self.connect_str)
self.cursor = self.conn.cursor()
self.cursor.execute(self.sql_query)
self.conn.commit()
self.cursor.close()
self.conn.close()
if __name__ == "__main__":
deleteTxUpdate = DeleteTxUpdateSQL(CONNECTION_STR, DELETE_SCRIPT, SLEEP_TIME)
deleteTxUpdate.run()
| 26.77551
| 111
| 0.624238
|
c860efe1a3155bbb8553cd22208e8ac22d4beb94
| 1,738
|
py
|
Python
|
tools/nonhinting/setprep-fontforge.py
|
graffitiMSX/msxpower-googlefontdirectory
|
1e201a68c8181698c143279734c4677f194855d8
|
[
"Apache-2.0"
] | 1
|
2016-06-05T07:51:16.000Z
|
2016-06-05T07:51:16.000Z
|
tools/nonhinting/setprep-fontforge.py
|
graffitiMSX/msxpower-googlefontdirectory
|
1e201a68c8181698c143279734c4677f194855d8
|
[
"Apache-2.0"
] | null | null | null |
tools/nonhinting/setprep-fontforge.py
|
graffitiMSX/msxpower-googlefontdirectory
|
1e201a68c8181698c143279734c4677f194855d8
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# setprep-fontforge.py
#
# Copyright (c) 2011 Dave Crossland <dave@understandinglimited.com>
#
# This program takes a TTF font with no hinting and sets
# its PREP hinting table with magic values that turn on
# 'drop out control' - the magic comes from Raph Levien
# <firstname.lastname@gmail.com> and is:
#
# PUSHW_1
# 511
# SCANCTRL
# PUSHB_1
# 4
# SCANTYPE
#
# This script depends on the FontForge Python library, available
# in most packaging systems and sf.net/projects/fontforge/
#
# Usage:
#
# $ ./setprep-fontforge.py FontIn.ttf [FontOut.ttf]
# Import our system library and fontTools ttLib
import sys, fontforge
def getprep(font):
prepAsm = font.getTableData("prep")
prepText = fontforge.unParseTTInstrs(prepAsm)
return prepText
def main(argv):
# Open the font file supplied as the first argument on the command line
font_in = argv[1]
font = fontforge.open(font_in)
# If there is a second font file specified on the command line, output to that
if argv[2]:
font_out = argv[2]
# Else, update the file
else:
font_out = font_in
# Print the existing PREP table
print "The PREP table is:"
print getprep(font)
# Set PREP to magic prep
prepTextMagic = """PUSHW_1
511
SCANCTRL
PUSHB_1
4
SCANTYPE"""
prepAsmMagic = fontforge.parseTTInstrs(prepTextMagic)
font.setTableData("prep",prepAsmMagic)
# Print the existing PREP table
print "The PREP table is now:"
print getprep(font)
# Generate the new font with no hinting instructions
flags = ('omit-instructions',)
font.generate(font_out, flags = flags)
print "in file", font_out, " - done!"
if __name__ == '__main__':
main(sys.argv)
| 25.188406
| 80
| 0.694476
|
b3dae082194c9b0acea148e3bd7d54549ace5b22
| 649
|
py
|
Python
|
tests/unit/test_common_util.py
|
LeonDLotter/findpapers
|
8346c14eb2eceb4e8216d768bb65bb9b025dea45
|
[
"MIT"
] | 56
|
2020-12-18T19:38:29.000Z
|
2022-03-13T20:48:01.000Z
|
tests/unit/test_common_util.py
|
LeonDLotter/findpapers
|
8346c14eb2eceb4e8216d768bb65bb9b025dea45
|
[
"MIT"
] | 19
|
2021-08-19T13:47:33.000Z
|
2021-12-13T11:49:10.000Z
|
tests/unit/test_common_util.py
|
LeonDLotter/findpapers
|
8346c14eb2eceb4e8216d768bb65bb9b025dea45
|
[
"MIT"
] | 11
|
2020-12-28T10:22:26.000Z
|
2022-03-27T16:26:30.000Z
|
import pytest
from typing import Callable, Any
import findpapers.utils.common_util as util
@pytest.mark.parametrize('string_format, numeric_format', [
('december', '12'),
('jan', '01'),
('February', '02'),
])
def test_get_numeric_month_by_string(string_format: str, numeric_format: str):
assert util.get_numeric_month_by_string(string_format) == numeric_format
@pytest.mark.parametrize('func, result', [
(lambda: None, None),
(lambda: ':)', ':)'),
(lambda: 10/0, None), # forcing a ZeroDivisionError exception
])
def test_try_success(func: Callable, result: Any):
assert util.try_success(func, 2, 1) == result
| 27.041667
| 78
| 0.697997
|
48553176437d8d202c9e4fde4c89cfe7235cd554
| 420
|
py
|
Python
|
core/lib/middleware.py
|
jiangxuewen16/hq-crawler
|
f03ec1e454513307e335943f224f4d927eaf2bbf
|
[
"MIT"
] | 1
|
2021-02-25T08:33:40.000Z
|
2021-02-25T08:33:40.000Z
|
core/lib/middleware.py
|
jiangxuewen16/hq-crawler
|
f03ec1e454513307e335943f224f4d927eaf2bbf
|
[
"MIT"
] | null | null | null |
core/lib/middleware.py
|
jiangxuewen16/hq-crawler
|
f03ec1e454513307e335943f224f4d927eaf2bbf
|
[
"MIT"
] | 2
|
2021-03-08T07:25:16.000Z
|
2021-12-07T15:28:02.000Z
|
from django.utils.deprecation import MiddlewareMixin
from core.lib.view import BaseView
class MiddleSetMethod(MiddlewareMixin):
def process_request(self, request):
BaseView.method = request.method
def process_response(self, request, response):
return response # 执行完了这个中间件一定要 传递给下一个中间件
def process_exception(self, request, exception):
print('MiddleSetMethod的process_exception')
| 26.25
| 52
| 0.757143
|
bae423031203ee5bac47917da82f87f8611acd98
| 1,544
|
py
|
Python
|
var/spack/repos/builtin/packages/libcumlprims/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/libcumlprims/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8
|
2021-11-09T20:28:40.000Z
|
2022-03-15T03:26:33.000Z
|
var/spack/repos/builtin/packages/libcumlprims/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2019-02-08T20:37:20.000Z
|
2019-03-31T15:19:26.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Libcumlprims(Package):
"""libcuMLPrims library"""
homepage = "https://rapids.ai"
url = "https://anaconda.org/nvidia/libcumlprims/0.15.0/download/linux-64/libcumlprims-0.15.0-cuda11.0_gdbd0d39_0.tar.bz2"
version('0.15.0-cuda11.0_gdbd0d39_0', sha256='0edc55767f06f533fbff7a0fecaf6e6d4f82eec39604b3874a07b5609f79ece8')
version('0.15.0-cuda10.2_gdbd0d39_0', sha256='b7a8740de0d15380829f42fcb078567e73ab7d29b14be073376153bf2d8ec945')
version('0.15.0-cuda10.1_gdbd0d39_0', sha256='f055f904b5ef67995869b0bc648d9fe30839b08e77cb335573bf9f1c816d4d9b')
depends_on('cuda@11.0.0:11.0', when='@0.15.0-cuda11.0_gdbd0d39_0')
depends_on('cuda@10.2.0:10.2', when='@0.15.0-cuda10.2_gdbd0d39_0')
depends_on('cuda@10.1.0:10.1', when='@0.15.0-cuda10.1_gdbd0d39_0')
@property
def headers(self):
headers = find_headers('*', self.prefix.include, recursive=True)
headers.directories = [self.prefix.include,
self.prefix.include.cumlprims]
return headers
def url_for_version(self, version):
url = "https://anaconda.org/nvidia/libcumlprims/{0}/download/linux-64/libcumlprims-{1}.tar.bz2"
return url.format(version.up_to(3), version)
def install(self, spec, prefix):
install_tree('.', self.prefix)
| 42.888889
| 130
| 0.713083
|
76bc0a93e139075ac0059a7845cf1c2d41c3f741
| 6,705
|
py
|
Python
|
news_crawler/spiders/cicero.py
|
andreeaiana/renewrs_corpus_extraction
|
b077f94ea80eb2c3aa25477f2ef48f2d6e8825e7
|
[
"MIT"
] | 2
|
2020-11-19T19:14:57.000Z
|
2020-11-24T23:33:08.000Z
|
news_crawler/spiders/cicero.py
|
andreeaiana/renewrs_corpus_extraction
|
b077f94ea80eb2c3aa25477f2ef48f2d6e8825e7
|
[
"MIT"
] | null | null | null |
news_crawler/spiders/cicero.py
|
andreeaiana/renewrs_corpus_extraction
|
b077f94ea80eb2c3aa25477f2ef48f2d6e8825e7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import sys
import dateparser
from news_crawler.spiders import BaseSpider
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
from datetime import datetime
sys.path.insert(0, os.path.join(os.getcwd(), "..",))
from news_crawler.items import NewsCrawlerItem
from news_crawler.utils import remove_empty_paragraphs
class CiceroSpider(BaseSpider):
"""Spider for Cicero"""
name = 'cicero'
rotate_user_agent = True
allowed_domains = ['www.cicero.de']
start_urls = ['https://www.cicero.de/']
# Exclude paid and English articles and pages without relevant articles
rules = (
Rule(
LinkExtractor(
allow=(r'www\.cicero\.de\/\w.*'),
deny=(r'www\.cicero\.de\/cicero\-plus',
r'www\.cicero\.de\/newsletter\-anmeldung',
r'www\.cicero\.de\/rss\.xml$',
r'www\.cicero\.de\/comment\/\w.*'
)
),
callback='parse_item',
follow=True
),
)
def parse_item(self, response):
"""
Checks article validity. If valid, it parses it.
"""
# Exclude paid articles
if response.xpath('//div[@class="paywall-text"]').get():
return
# Check date validity
metadata = response.xpath('//div[@class="teaser-small__metadata"]/p/text()').getall()
if not metadata:
return
creation_date = metadata[-1].strip()
if not creation_date:
return
creation_date = creation_date.split('am ')[-1]
creation_date = dateparser.parse(creation_date)
if self.is_out_of_date(creation_date):
return
# Extract the article's paragraphs
paragraphs = [node.xpath('string()').get().strip() for node in response.xpath('//div[@class="field field-name-field-cc-body"]/p')]
paragraphs = remove_empty_paragraphs(paragraphs)
text = ' '.join([para for para in paragraphs])
# Check article's length validity
if not self.has_min_length(text):
return
# Check keywords validity
if not self.has_valid_keywords(text):
return
# Parse the valid article
item = NewsCrawlerItem()
item['news_outlet'] = 'cicero'
item['provenance'] = response.url
item['query_keywords'] = self.get_query_keywords()
# Get creation, modification, and crawling dates
item['creation_date'] = creation_date.strftime('%d.%m.%Y')
item['last_modified'] = creation_date.strftime('%d.%m.%Y')
item['crawl_date'] = datetime.now().strftime('%d.%m.%Y')
# Get authors
metadata = response.xpath('//div[@class="teaser-small__metadata"]/p//text()').getall()
if not metadata:
item['author_person'] = list()
item['author_organization'] = list()
else:
metadata = [s.strip() for s in metadata]
if len(metadata) > 1 :
authors = metadata[1]
# Check if the authors are persons
if len(authors.split()) == 1 or 'CICERO' in authors:
# Check if the author is an organization
author_person = list()
author_organization = [authors]
elif ',' in authors:
# There are more than two persons listed as author
authors = authors.split(', ')
author_person = authors[:-1]
if 'UND' in authors[-1]:
author_person.extend(authors[-1].split(' UND '))
else:
author_person.extend(authors[-1])
author_organization = list()
elif 'UND' in authors:
# There are just two persons listed as authors
author_person = authors.split(' UND ')
author_organization = list()
else:
author_person = [authors]
author_organization = list()
else:
authors = metadata[0]
author_person = [authors.split('VON ')[-1].split(', ')[0].split('am')[0]]
author_organization = list()
# All words are uppercased, capitalize them instead
item['author_person'] = [author.title() for author in author_person]
item['author_organization'] = [author.title() for author in author_organization]
# Extract keywords
news_keywords = response.xpath('//meta[@name="keywords"]/@content').get()
item['news_keywords'] = news_keywords.split(', ') if news_keywords else list()
# Get title, description, and body of article
title = response.xpath('//meta[@property="og:title"]/@content').get()
description = response.xpath('//meta[@property="og:description"]/@content').get()
# Body as dictionary: key = headline (if available, otherwise empty string), values = list of corresponding paragraphs
body = dict()
if response.xpath('//h3[not(contains(text(), "Kommentare"))]'):
# Extract headlines
headlines = [h3.xpath('string()').get().strip() for h3 in response.xpath('//h3[not(contains(text(), "Kommentare"))]')]
# Extract paragraphs with headlines
text = [node.xpath('string()').get().strip() for node in response.xpath('//div[@class="field field-name-field-cc-body"]/p | //h3[not(contains(text(), "Kommentare"))]')]
# Extract paragraphs between the abstract and the first headline
body[''] = remove_empty_paragraphs(text[:text.index(headlines[0])])
# Extract paragraphs corresponding to each headline, except the last one
for i in range(len(headlines)-1):
body[headlines[i]] = remove_empty_paragraphs(text[text.index(headlines[i])+1:text.index(headlines[i+1])])
# Extract the paragraphs belonging to the last headline
body[headlines[-1]] = remove_empty_paragraphs(text[text.index(headlines[-1])+1:])
else:
# The article has no headlines, just paragraphs
body[''] = paragraphs
item['content'] = {'title': title, 'description': description, 'body':body}
# No recommendations related to the article are available
item['recommendations'] = list()
item['response_body'] = response.body
yield item
| 41.388889
| 180
| 0.564504
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.