hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
26c6d3dc79c51eac0baadcc61f39d7c3b322a898 | 2,079 | py | Python | _posts/images/doConvert.py | mianli/mianli.GitHub.io | 6ab193670fb714e2817c64609f8d9e34d3628ca0 | [
"Apache-2.0"
] | null | null | null | _posts/images/doConvert.py | mianli/mianli.GitHub.io | 6ab193670fb714e2817c64609f8d9e34d3628ca0 | [
"Apache-2.0"
] | null | null | null | _posts/images/doConvert.py | mianli/mianli.GitHub.io | 6ab193670fb714e2817c64609f8d9e34d3628ca0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#-*- coding: UTF-8 -*-
import os
import time
import subprocess
import shutil
import sys
os.chdir(sys.path[0])
print(os.getcwd())
cacheFolder = os.getcwd() + "/temp/"
cacheFile = cacheFolder + "temp"
caches = []
generalSize = "640X640"
if(len(sys.argv) > 1) :
wishSize = 640 * int(sys.argv[1])
generalSize = "%dx%d" % (wishSize, wishSize)
print("开始...")
checkTempFileExist()
loadCache()
initRunner()
print("已结束.")
| 25.9875 | 112 | 0.593074 | #!/usr/bin/env python
#-*- coding: UTF-8 -*-
import os
import time
import subprocess
import shutil
import sys
os.chdir(sys.path[0])
print(os.getcwd())
cacheFolder = os.getcwd() + "/temp/"
cacheFile = cacheFolder + "temp"
caches = []
generalSize = "640X640"
if(len(sys.argv) > 1) :
wishSize = 640 * int(sys.argv[1])
generalSize = "%dx%d" % (wishSize, wishSize)
def initRunner():
path = os.getcwd()
os.chdir(path)
files = os.listdir(path)
for file in files:
arr = os.path.splitext(file)
suffix = arr[-1]
convert(file, suffix)
def checkTempFileExist():
if not os.path.exists(cacheFolder):
print("缓存文件生成.")
os.makedirs(cacheFolder)
def loadCache():
if not os.path.exists(cacheFile):
return
fp = open(cacheFile, "r")
for c in fp:
caches.append(c.replace("\n", ""))
def save(cache):
fp = open(cacheFile, "a")
fp.write(cache + "\n")
def replace_file_cache(currentTime, filename, who):
curfile = os.getcwd() + "/" + filename
shutil.move(curfile, cacheFolder + filename)
shutil.move(who, curfile)
save(filename)
def convert(filename, suffix):
if filename in caches:
print("%s已经转换" % filename)
return
currentTime = time.strftime('%Y%m%d', time.localtime(time.time()))
who = ""
if(suffix == ".gif"):
print(filename + "开始转换")
temp = "%s.gif" % currentTime
who = cacheFolder + temp
cmd = "gifsicle %s --colors 256 --resize-fit %s -o %s" % (filename, generalSize, who)
os.system(cmd)
replace_file_cache(currentTime, filename, who)
elif(len(suffix) > 0):
if(suffix == ".png"
or suffix == ".jpg"):
print(filename + "开始转换")
temp = "%s.png" % currentTime
who = cacheFolder + temp
os.system("%s\/convert.sh %s %s %s" % (os.getcwd(), os.getcwd() + "/" + filename, generalSize, who))
replace_file_cache(currentTime, filename, who)
print("开始...")
checkTempFileExist()
loadCache()
initRunner()
print("已结束.")
| 1,530 | 0 | 138 |
ad9b07576281a042d8aad9bfcdd0adced10ac6e5 | 5,537 | py | Python | Back-End/Python/External Libraries/Flask/Flask-Extensions/Flask-Admin/examples_01/flask_admin_/form/fields.py | ASHISHKUMAR2411/Programming-CookBook | 9c60655d64d21985ccb4196360858d98344701f9 | [
"MIT"
] | 25 | 2021-04-28T02:51:26.000Z | 2022-03-24T13:58:04.000Z | Back-End/Python/External Libraries/Flask/Flask-Extensions/Flask-Admin/examples_01/flask_admin_/form/fields.py | ASHISHKUMAR2411/Programming-CookBook | 9c60655d64d21985ccb4196360858d98344701f9 | [
"MIT"
] | 1 | 2022-03-03T23:33:41.000Z | 2022-03-03T23:35:41.000Z | Back-End/Python/External Libraries/Flask/Flask-Extensions/Flask-Admin/examples_01/flask_admin_/form/fields.py | ASHISHKUMAR2411/Programming-CookBook | 9c60655d64d21985ccb4196360858d98344701f9 | [
"MIT"
] | 15 | 2021-05-30T01:35:20.000Z | 2022-03-25T12:38:25.000Z | # Note taken from --> https://gist.github.com/JungeAlexander/6ce0a5213f3af56d7369 & https://stackoverflow.com/questions/714063/importing-modules-from-parent-folder/11158224#11158224
import os, sys, inspect
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
import time
import datetime
import json
from wtforms import fields
from _compat import text_type, as_unicode
# from widgets import * as admin_widgets
import form.widgets as admin_widgets
__all__ = ['DateTimeField', 'TimeField', 'Select2Field', 'Select2TagsField', 'JSONField']
| 35.267516 | 182 | 0.579917 | # Note taken from --> https://gist.github.com/JungeAlexander/6ce0a5213f3af56d7369 & https://stackoverflow.com/questions/714063/importing-modules-from-parent-folder/11158224#11158224
import os, sys, inspect
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
import time
import datetime
import json
from wtforms import fields
from _compat import text_type, as_unicode
# from widgets import * as admin_widgets
import form.widgets as admin_widgets
__all__ = ['DateTimeField', 'TimeField', 'Select2Field', 'Select2TagsField', 'JSONField']
class DateTimeField(fields.DateTimeField):
widget = admin_widgets.DateTimePickerWidget()
def __init__(self, label=None, validators=None, format=None, **kwargs): # :param format:Format for text to date conversion. Defaults to '%Y-%m-%d %H:%M:%S'
super(DateTimeField, self).__init__(label, validators, **kwargs)
self.format = format or '%Y-%m-%d %H:%M:%S'
class TimeField(fields.Field):
widget = admin_widgets.TimePickerWidget()
def __init__(self, label=None, validators=None, formats=None,
default_format=None, widget_format=None, **kwargs): # :param default_format: Default time format. Defaults to '%H:%M:%S'
super(TimeField, self).__init__(label, validators, **kwargs)
self.formats = formats or ('%H:%M:%S', '%H:%M',
'%I:%M:%S%p', '%I:%M%p',
'%I:%M:%S %p', '%I:%M %p')
self.default_format = default_format or '%H:%M:%S'
def _value(self):
if self.raw_data:
return u' '.join(self.raw_data)
elif self.data is not None:
return self.data.strftime(self.default_format)
else:
return u''
def process_formdata(self, valuelist):
if valuelist:
date_str = u' '.join(valuelist)
if date_str.strip():
for format in self.formats:
try:
timetuple = time.strptime(date_str, format)
self.data = datetime.time(timetuple.tm_hour,
timetuple.tm_min,
timetuple.tm_sec)
return
except ValueError:
pass
raise ValueError(gettext('Invalid Time format.'))
else:
self.data = None
class Select2Field(fields.SelectField):
widget = admin_widgets.Select2Widget()
def __init__(self, label=None, validators=None, coerce=text_type,
choices=None, allow_blank=False, blank_text=None, **kwargs):
super(Select2Field, self).__init__(label, validators, coerce, choices, **kwargs)
self.allow_blank = allow_blank
self.blank_text = blank_text or ' '
def iter_choices(self):
if self.allow_blank:
yield (u'__None', self.blank_text, self.data is None)
for value, label, in self.choices:
yield (value, label, self.coerce(value) == self.data)
def process_data(self, value):
if values is None:
self.data = None
else:
try:
self.data = self.coerce(value)
except (ValueError, TypeError):
self.data = None
def process_formdata(self, valuelist):
if valuelist:
if valuelist[0] == '__None':
self.data = None
else:
try:
self.data = self.coerce(valuelist[0])
except ValueError:
raise ValueError(self.gettext(u'Invalid Choice: could not coerce.'))
def pre_validate(self, form):
if self.allow_blank and self.data is None:
return
super(Select2Field, self).pre_validate(form)
class Select2TagsField(fields.StringField):
widget = admin_widgets.Select2TagsWidget()
def __init__(self, label=None, validators=None, save_as_list=False, coerce=text_type, **kwargs):
self.save_as_list = save_as_list
self.coerce = coerce
super(Select2TagsField, self).__init__(label, validators, **kwargs)
def process_formdata(self,valuelist):
if valuelist:
if self.save_as_list:
self.data = [self.coerce(v.strip()) for v in valuelist[0].split(',') if v.strip()]
else:
self.data = self.coerce(valuelist[0])
def _value(self):
if isinstance(self.data, (list, tuple)):
return u','.join(as_unicode(v) for v in self.data)
elif self.data:
return as_unicode(self.data)
else:
return u''
class JSONField(fields.TextAreaField):
def _value(self):
if self.raw_data:
return self.raw_data[0]
elif self.data:
# prevent utf8 characters from being converted to ascii
return as_unicode(json.dumps(self.data, ensure_ascii=False))
else:
return ''
def process_formdata(self, valuelist):
if valuelist:
value = valuelist[0]
if not valie:
self.data = None
return
try:
self.data = json.loads(valuelist[0])
except ValueError:
raise ValueError(self.gettext('Invalid JSON')) | 4,060 | 629 | 173 |
7d855948e033681544c624f9db202049a09935ef | 4,275 | py | Python | tests/test_chi_ssa_23.py | MAYANK25402/city-scrapers | 08f92ec5b68682a8120eee1a13c4a03fe0335b9e | [
"MIT"
] | 255 | 2018-03-06T20:12:03.000Z | 2022-03-05T03:06:45.000Z | tests/test_chi_ssa_23.py | MAYANK25402/city-scrapers | 08f92ec5b68682a8120eee1a13c4a03fe0335b9e | [
"MIT"
] | 514 | 2018-02-02T16:12:50.000Z | 2022-03-21T20:07:35.000Z | tests/test_chi_ssa_23.py | MAYANK25402/city-scrapers | 08f92ec5b68682a8120eee1a13c4a03fe0335b9e | [
"MIT"
] | 342 | 2018-02-03T04:05:37.000Z | 2022-03-18T16:34:58.000Z | from datetime import datetime
from os.path import dirname, join
import pytest
from city_scrapers_core.constants import COMMISSION, PASSED, TENTATIVE
from city_scrapers_core.utils import file_response
from freezegun import freeze_time
from scrapy.settings import Settings
from city_scrapers.spiders.chi_ssa_23 import ChiSsa23Spider
test_response = file_response(
join(dirname(__file__), "files", "chi_ssa_23.html"),
url="https://www.lincolnparkchamber.com/clark-street-ssa-administration/",
)
spider = ChiSsa23Spider()
spider.settings = Settings(values={"CITY_SCRAPERS_ARCHIVE": False})
freezer = freeze_time("2020-05-11")
freezer.start()
parsed_items = sorted(
[item for item in spider.parse(test_response)],
key=lambda i: i["start"],
reverse=True,
)
freezer.stop()
@pytest.mark.parametrize("item", parsed_items)
@pytest.mark.parametrize("item", parsed_items)
@pytest.mark.parametrize("item", parsed_items)
@pytest.mark.parametrize("item", parsed_items)
@pytest.mark.parametrize("item", parsed_items)
@pytest.mark.parametrize("item", parsed_items)
| 27.941176 | 78 | 0.633684 | from datetime import datetime
from os.path import dirname, join
import pytest
from city_scrapers_core.constants import COMMISSION, PASSED, TENTATIVE
from city_scrapers_core.utils import file_response
from freezegun import freeze_time
from scrapy.settings import Settings
from city_scrapers.spiders.chi_ssa_23 import ChiSsa23Spider
test_response = file_response(
join(dirname(__file__), "files", "chi_ssa_23.html"),
url="https://www.lincolnparkchamber.com/clark-street-ssa-administration/",
)
spider = ChiSsa23Spider()
spider.settings = Settings(values={"CITY_SCRAPERS_ARCHIVE": False})
freezer = freeze_time("2020-05-11")
freezer.start()
parsed_items = sorted(
[item for item in spider.parse(test_response)],
key=lambda i: i["start"],
reverse=True,
)
freezer.stop()
def test_count():
assert len(parsed_items) == 12
@pytest.mark.parametrize("item", parsed_items)
def test_title(item):
assert item["title"] == "Commission"
@pytest.mark.parametrize("item", parsed_items)
def test_description(item):
assert (
item["description"] == "All meetings held Wednesdays at 4:00 p.m. "
"Meetings typically run 90 minute"
"s. Please contact the LPCC to confirm meeting "
"locations (773) 880-5200. "
)
def test_start():
expected_starts = [
datetime(2020, 11, 18, 16, 0),
datetime(2020, 9, 9, 16, 0),
datetime(2020, 7, 8, 16, 0),
datetime(2020, 5, 27, 16, 0),
datetime(2020, 4, 22, 16, 0),
datetime(2020, 4, 3, 10, 30),
datetime(2020, 3, 24, 9, 37),
datetime(2020, 2, 5, 16, 0),
datetime(2019, 11, 13, 16, 0),
datetime(2019, 9, 4, 16, 0),
datetime(2019, 7, 10, 16, 0),
datetime(2019, 5, 15, 16, 0),
]
for i in range(len(parsed_items)):
assert parsed_items[i]["start"] == expected_starts[i]
def test_end():
expected_ends = [
datetime(2020, 11, 18, 17, 30),
datetime(2020, 9, 9, 17, 30),
datetime(2020, 7, 8, 17, 30),
datetime(2020, 5, 27, 17, 30),
datetime(2020, 4, 22, 17, 30),
datetime(2020, 4, 3, 12, 00),
datetime(2020, 3, 24, 11, 7),
datetime(2020, 2, 5, 17, 30),
datetime(2019, 11, 13, 17, 30),
datetime(2019, 9, 4, 17, 30),
datetime(2019, 7, 10, 17, 30),
datetime(2019, 5, 15, 17, 30),
]
for i in range(len(parsed_items)):
assert parsed_items[i]["end"] == expected_ends[i]
@pytest.mark.parametrize("item", parsed_items)
def test_time_notes(item):
assert item["time_notes"] == "Estimated 90 minutes duration"
def test_id():
expected_ids = [
"chi_ssa_23/202011181600/x/commission",
"chi_ssa_23/202009091600/x/commission",
"chi_ssa_23/202007081600/x/commission",
"chi_ssa_23/202005271600/x/commission",
"chi_ssa_23/202004221600/x/commission",
"chi_ssa_23/202004031030/x/commission",
"chi_ssa_23/202003240937/x/commission",
"chi_ssa_23/202002051600/x/commission",
"chi_ssa_23/201911131600/x/commission",
"chi_ssa_23/201909041600/x/commission",
"chi_ssa_23/201907101600/x/commission",
"chi_ssa_23/201905151600/x/commission",
]
for i in range(len(parsed_items)):
assert parsed_items[i]["id"] == expected_ids[i]
def test_status():
expected_status = [
TENTATIVE,
TENTATIVE,
TENTATIVE,
TENTATIVE,
PASSED,
PASSED,
PASSED,
PASSED,
PASSED,
PASSED,
PASSED,
PASSED,
]
for i in range(len(parsed_items)):
assert parsed_items[i]["status"] == expected_status[i]
@pytest.mark.parametrize("item", parsed_items)
def test_location(item):
assert item["location"] == {
"name": "Lincoln Park Chamber of Commerce",
"address": "2468 N. Lincoln Chicago, IL 60614",
}
@pytest.mark.parametrize("item", parsed_items)
def test_source(item):
assert item["source"] == test_response.url
@pytest.mark.parametrize("item", parsed_items)
def test_classification(item):
assert item["classification"] == COMMISSION
def test_all_day():
for i in range(len(parsed_items)):
assert parsed_items[i]["all_day"] is False
| 2,912 | 0 | 270 |
aa6af1b7ac40407373d7abd8d55f9fb09c26ff8a | 604 | py | Python | test/tail.py | ubirch/visitor-counter | 97168d7252376358477c52bd956626596119526d | [
"Apache-2.0"
] | null | null | null | test/tail.py | ubirch/visitor-counter | 97168d7252376358477c52bd956626596119526d | [
"Apache-2.0"
] | null | null | null | test/tail.py | ubirch/visitor-counter | 97168d7252376358477c52bd956626596119526d | [
"Apache-2.0"
] | null | null | null |
import subprocess
filename = "../data/crackdump-01.csv"
read()
| 20.827586 | 43 | 0.541391 |
import subprocess
filename = "../data/crackdump-01.csv"
def filterLine(line):
filteredLine = ""
for c in line:
if(c >= ' ' and c <= '~'):
filteredLine = filteredLine + c
return filteredLine
def readlines():
with open(filename, 'r') as reader:
for line in reader.readlines():
fline = filterLine(line)
if(len(fline)>0):
print(fline)
def read():
try:
with open(filename, 'r') as reader:
while True:
print(reader.readline())
except KeyboardInterrupt:
exit(1)
read()
| 469 | 0 | 69 |
ced0bd786362d194b6e5055700dffd67c232fe8c | 11,263 | py | Python | src/scep/Client/message.py | bikram990/PyScep | bf5ddae43a461c9aecf7f9fce357ba2ad6df19d7 | [
"MIT"
] | 3 | 2021-06-24T11:19:17.000Z | 2021-12-15T02:23:27.000Z | src/scep/Client/message.py | bikram990/PyScep | bf5ddae43a461c9aecf7f9fce357ba2ad6df19d7 | [
"MIT"
] | 1 | 2022-01-03T14:36:52.000Z | 2022-01-09T02:50:03.000Z | src/scep/Client/message.py | bikram990/PyScep | bf5ddae43a461c9aecf7f9fce357ba2ad6df19d7 | [
"MIT"
] | 1 | 2021-06-08T15:46:31.000Z | 2021-06-08T15:46:31.000Z | import logging
from base64 import b64encode
from asn1crypto.cms import CMSAttribute, ContentInfo, IssuerAndSerialNumber
from cryptography.hazmat.primitives.asymmetric import padding
from .asn1 import SCEPCMSAttributeType
from .cryptoutils import digest_for_data, decrypt, digest_function_for_type
from .enums import MessageType, PKIStatus
from .certificate import Certificate
CMSAttribute._fields = [
('type', SCEPCMSAttributeType),
('values', None),
]
logger = logging.getLogger(__name__)
| 42.026119 | 176 | 0.61671 | import logging
from base64 import b64encode
from asn1crypto.cms import CMSAttribute, ContentInfo, IssuerAndSerialNumber
from cryptography.hazmat.primitives.asymmetric import padding
from .asn1 import SCEPCMSAttributeType
from .cryptoutils import digest_for_data, decrypt, digest_function_for_type
from .enums import MessageType, PKIStatus
from .certificate import Certificate
CMSAttribute._fields = [
('type', SCEPCMSAttributeType),
('values', None),
]
logger = logging.getLogger(__name__)
def get_digest_method(name='sha1'):
pass
class SCEPMessage(object):
@classmethod
def parse(cls, raw, signer_cert=None):
msg = cls()
cinfo = ContentInfo.load(raw)
assert cinfo['content_type'].native == 'signed_data'
# 1.2.840.113549.1.7.1
signed_data = cinfo['content']
if len(signed_data['certificates']) > 0:
certs = [Certificate(certificate=cert.chosen) for cert in signed_data['certificates']]
logger.debug('{} certificate(s) attached to signedData'.format(len(certs)))
msg._certificates = certs
else:
certs = None
logger.debug('No certificates attached to SignedData')
# Iterate through signers and verify the signature for each.
# Set convenience attributes at the same time
for signer_info in cinfo['content']['signer_infos']:
# version can be 1 (issuerandserial) or 3 (subjectkeyidentifier)
assert signer_info['version'] != 'v1' # we only support version 1
identifier = signer_info['sid'].chosen
assert isinstance(identifier, IssuerAndSerialNumber) # TODO: also support other signer ids
sig_algo = signer_info['signature_algorithm'].signature_algo
logger.debug('Using signature algorithm: {}'.format(sig_algo))
hash_algo = signer_info['digest_algorithm']['algorithm'].native
logger.debug('Using digest algorithm: {}'.format(hash_algo))
assert sig_algo == 'rsassa_pkcs1v15' # We only support PKCS1v1.5
if certs is not None and len(certs) > 0: # verify content
if signer_cert is None:
if certs is not None:
for c in certs: # find signer cert
if c.serial_number == identifier['serial_number'].native: # TODO: also convert issuer
signer_cert = c
break
# Set the signer for convenience on the instance
msg._signer_info = signer_info
if 'signed_attrs' in signer_info:
assert signed_data['encap_content_info']['content_type'].native == 'data'
assert signer_cert is not None
signed_attrs = signer_info['signed_attrs']
signed_attrs_data = signed_attrs.dump()
signed_attrs_data = b'\x31' + signed_attrs_data[1:]
signer_cert.verify(
signature=signer_info.native['signature'],
padding_type='pkcs',
digest_algorithm=hash_algo,
data=signed_attrs_data
)
# signer_cert.verify(signature=signer_info['signature'].native, padding_type='pkcs', digest_algorithm=hash_algo, data=signer_info['signed_attrs'].dump())
# /*
# * Check that the signerinfo attributes obey the attribute rules which includes
# * the following checks
# * - If any signed attributes exist then there must be a Content Type
# * and Message Digest attribute in the signed attributes.
# * - The countersignature attribute is an optional unsigned attribute only.
# * - Content Type, Message Digest, and Signing time attributes are signed
# * attributes. Only one instance of each is allowed, with each of these
# * attributes containing a single attribute value in its set.
# */
for signed_attr in signed_attrs:
name = SCEPCMSAttributeType.map(signed_attr['type'].dotted)
if name == 'transaction_id':
msg._transaction_id = signed_attr['values'][0].native
elif name == 'message_type':
msg._message_type = MessageType(signed_attr['values'][0].native)
elif name == 'sender_nonce':
msg._sender_nonce = signed_attr['values'][0].native
elif name == 'recipient_nonce':
msg._recipient_nonce = signed_attr['values'][0].native
elif name == 'pki_status':
msg._pki_status = PKIStatus(signed_attr['values'][0].native)
elif name == 'fail_info':
msg._fail_info = signed_attr['values'][0].native
elif name == 'content_type':
if msg._content_type is not None:
raise Exception('found multiple content_type in signed attributes')
msg._content_type = signed_attr['values'][0].native
elif name == 'signing_time':
if msg._signing_time is not None:
raise Exception('found multiple signing_time in signed attributes')
msg._signing_time = signed_attr['values'][0].native
elif name == 'message_digest':
if msg._message_digest is not None:
raise Exception('found multiple message_digest in signed attributes')
msg._message_digest = signed_attr['values'][0].native
elif name == 'algorithm_protection':
msg._algorithm_protection = signed_attr['values'][0].native
assert msg._message_digest is not None
assert msg._content_type is not None
calculated_digest = digest_for_data(algorithm=hash_algo, data=signed_data['encap_content_info']['content'].native)
assert msg._message_digest == calculated_digest
msg._signed_data = cinfo['content']['encap_content_info']['content']
return msg
def __init__(self, message_type=MessageType.CertRep, transaction_id=None, sender_nonce=None,
recipient_nonce=None):
self._content_info = None
self._transaction_id = transaction_id
self._message_type = message_type
self._sender_nonce = sender_nonce
self._recipient_nonce = recipient_nonce
self._pki_status = None
self._signer_info = None
self._signed_data = None
self._certificates = []
self._content_type = None
self._signing_time = None
self._message_digest = None
self._algorithm_protection = None
@property
def certificates(self):
return self._certificates
@property
def transaction_id(self):
return self._transaction_id
@property
def message_type(self):
return self._message_type
@property
def sender_nonce(self):
return self._sender_nonce
@property
def recipient_nonce(self):
return self._recipient_nonce
@property
def pki_status(self):
return self._pki_status
@property
def fail_info(self):
return self._fail_info
@property
def signer(self):
sid = self._signer_info['sid']
if isinstance(sid.chosen, IssuerAndSerialNumber):
issuer = sid.chosen['issuer'].human_friendly
serial = sid.chosen['serial_number'].native
return issuer, serial
@property
def encap_content_info(self):
return ContentInfo.load(self._signed_data.native)
@property
def signed_data(self):
return self._signed_data
@signed_data.setter
def signed_data(self, value):
self._signed_data = value
def get_decrypted_envelope_data(self, certificate, key):
"""Decrypt the encrypted envelope data:
Decrypt encrypted_key using public key of CA
encrypted_key is available at content.recipient_infos[x].encrypted_key
algo is content.recipient_infos[x].key_encryption_algorithm
at the moment this is RSA
"""
encap = self.encap_content_info
ct = encap['content_type'].native
logger.debug('content_type is {}'.format(ct))
recipient_info = encap['content']['recipient_infos'][0]
encryption_algo = recipient_info.chosen['key_encryption_algorithm'].native
encrypted_key = recipient_info.chosen['encrypted_key'].native
supported_algos = ['rsaes_pkcs1v15', 'rsa']
assert encryption_algo['algorithm'] in supported_algos
plain_key = key.decrypt(
ciphertext=encrypted_key,
padding_type='pkcs'
)
# Now we have the plain key, we can decrypt the encrypted data
encrypted_contentinfo = encap['content']['encrypted_content_info']
logger.debug('encrypted content type is {}'.format(encrypted_contentinfo['content_type'].native))
algorithm = encrypted_contentinfo['content_encryption_algorithm'] #: EncryptionAlgorithm
encrypted_content_bytes = encrypted_contentinfo['encrypted_content'].native
logger.debug('key length is {}'.format(algorithm.key_length))
logger.debug('cipher is {}'.format(algorithm.encryption_cipher))
logger.debug('enc mode is {}'.format(algorithm.encryption_mode))
return decrypt(cipher=algorithm.encryption_cipher, mode=algorithm.encryption_mode, key=plain_key, iv=algorithm.encryption_iv, encrypted_content=encrypted_content_bytes)
def debug(self):
logger.debug("SCEP Message")
logger.debug("------------")
logger.debug("{:<20}: {}".format('Transaction ID', self.transaction_id))
logger.debug("{:<20}: {}".format('Message Type', self.message_type))
logger.debug("{:<20}: {}".format('PKI Status', self.pki_status))
if self.sender_nonce is not None:
logger.debug("{:<20}: {}".format('Sender Nonce', b64encode(self.sender_nonce)))
if self.recipient_nonce is not None:
logger.debug("{:<20}: {}".format('Recipient Nonce', b64encode(self.recipient_nonce)))
logger.debug('------------')
logger.debug('Certificates')
logger.debug('------------')
logger.debug('Includes {} certificate(s)'.format(len(self.certificates)))
for c in self.certificates:
logger.debug(c.subject.human_friendly)
logger.debug('Signer(s)')
logger.debug('------------')
x509name, serial = self.signer
logger.debug("{:<20}: {}".format('Issuer X.509 Name', x509name))
# logger.debug("{:<20}: {}".format('Issuer S/N', serial))
logger.debug("{:<20}: {}".format('Signature Algorithm', self._signer_info['signature_algorithm'].signature_algo))
logger.debug("{:<20}: {}".format('Digest Algorithm', self._signer_info['digest_algorithm']['algorithm'].native))
| 8,375 | 2,336 | 46 |
3f9b82a60b325ff4ad25578193bed486a87eb7a4 | 966 | py | Python | test_sort.py | hairizuanbinnoorazman/python-stuff | 4cbaf88494d64f3c84d6d6bb17be71227950df33 | [
"MIT"
] | null | null | null | test_sort.py | hairizuanbinnoorazman/python-stuff | 4cbaf88494d64f3c84d6d6bb17be71227950df33 | [
"MIT"
] | null | null | null | test_sort.py | hairizuanbinnoorazman/python-stuff | 4cbaf88494d64f3c84d6d6bb17be71227950df33 | [
"MIT"
] | null | null | null | import pytest
from sort import *
@pytest.mark.parametrize(
"input,expected",
[
pytest.param(
[4], [4]
),
pytest.param(
[5, 7, 6, 4], [4, 5, 6, 7]
),
],
)
@pytest.mark.parametrize(
"input,expected",
[
pytest.param(
[4], [4]
),
pytest.param(
[5, 7, 6, 4], [4, 5, 6, 7]
),
],
)
@pytest.mark.parametrize(
"input,expected",
[
pytest.param(
[4], [4]
),
pytest.param(
[4, 2], [2, 4]
),
pytest.param(
[5, 7, 6, 4], [4, 5, 6, 7]
),
],
)
| 17.563636 | 38 | 0.473085 | import pytest
from sort import *
@pytest.mark.parametrize(
"input,expected",
[
pytest.param(
[4], [4]
),
pytest.param(
[5, 7, 6, 4], [4, 5, 6, 7]
),
],
)
def test_bubble_sort(input, expected):
answer = bubble_sort(input)
assert answer == expected
@pytest.mark.parametrize(
"input,expected",
[
pytest.param(
[4], [4]
),
pytest.param(
[5, 7, 6, 4], [4, 5, 6, 7]
),
],
)
def test_merge_sort(input, expected):
answer = merge_sort(input)
assert answer == expected
@pytest.mark.parametrize(
"input,expected",
[
pytest.param(
[4], [4]
),
pytest.param(
[4, 2], [2, 4]
),
pytest.param(
[5, 7, 6, 4], [4, 5, 6, 7]
),
],
)
def test_quick_sort(input, expected):
answer = quick_sort(input)
assert answer == expected
| 233 | 0 | 66 |
0c7b6fdf670903eeac00b2bdebeaac77c27ff620 | 1,874 | py | Python | src/scs_host/sys/host_serial.py | south-coast-science/scs_host_cpc | 08b4a28c022936462b60823cca136ba6746eac57 | [
"MIT"
] | null | null | null | src/scs_host/sys/host_serial.py | south-coast-science/scs_host_cpc | 08b4a28c022936462b60823cca136ba6746eac57 | [
"MIT"
] | null | null | null | src/scs_host/sys/host_serial.py | south-coast-science/scs_host_cpc | 08b4a28c022936462b60823cca136ba6746eac57 | [
"MIT"
] | null | null | null | """
Created on 26 Dec 2016
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
https://learn.adafruit.com/setting-up-io-python-library-on-beaglebone-black/port
"""
import serial
import time
from scs_core.sys.serial import Serial
from scs_host.lock.lock import Lock
# --------------------------------------------------------------------------------------------------------------------
class HostSerial(Serial):
"""
classdocs
"""
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, device_path, baud_rate, hard_handshake=False):
"""
Constructor
"""
super().__init__(device_path, baud_rate, hard_handshake)
# ----------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------
@property
# ----------------------------------------------------------------------------------------------------------------
@property
| 27.15942 | 118 | 0.419424 | """
Created on 26 Dec 2016
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
https://learn.adafruit.com/setting-up-io-python-library-on-beaglebone-black/port
"""
import serial
import time
from scs_core.sys.serial import Serial
from scs_host.lock.lock import Lock
# --------------------------------------------------------------------------------------------------------------------
class HostSerial(Serial):
"""
classdocs
"""
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, device_path, baud_rate, hard_handshake=False):
"""
Constructor
"""
super().__init__(device_path, baud_rate, hard_handshake)
# ----------------------------------------------------------------------------------------------------------------
def open(self, lock_timeout, comms_timeout):
# lock...
Lock.acquire(self.__lock_name, lock_timeout)
# port...
self._ser = serial.Serial(port=self._device_identifier, baudrate=self._baud_rate, timeout=comms_timeout)
time.sleep(0.5) # as GE910 - 0.3
def close(self):
try:
# port...
if self._ser:
self._ser.close()
self._ser = None
finally:
# lock...
Lock.release(self.__lock_name)
# ----------------------------------------------------------------------------------------------------------------
@property
def device_identifier(self):
return self._device_identifier
# ----------------------------------------------------------------------------------------------------------------
@property
def __lock_name(self):
return self.__class__.__name__ + "-" + str(self._device_identifier).replace("/", "_")
| 619 | 0 | 106 |
421d48f13d55918c30bd53643b1115a143584398 | 1,324 | py | Python | utils_nlp/eval/evaluate_summarization.py | Anita1017/nlp-recipes | d4358193184cc0c80df04142f6e9773c47d2b0a4 | [
"MIT"
] | 4,407 | 2019-10-29T21:35:19.000Z | 2022-03-31T13:56:37.000Z | utils_nlp/eval/evaluate_summarization.py | shubham9g17/nlp-recipes | a5cd2303187239799ae0b1597a7c16eb99a97108 | [
"MIT"
] | 134 | 2019-10-30T23:38:59.000Z | 2022-03-01T11:42:53.000Z | utils_nlp/eval/evaluate_summarization.py | shubham9g17/nlp-recipes | a5cd2303187239799ae0b1597a7c16eb99a97108 | [
"MIT"
] | 726 | 2019-10-31T15:21:52.000Z | 2022-03-31T10:18:22.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
from random import random, seed
from bertsum.others.utils import test_rouge
def get_rouge(predictions, targets, temp_dir, random_seed=42):
"""
function to get the rouge metric for the prediction and the reference.
Args:
predictions (list of strings): Predictions to be compared.
target (list of strings): References
temp_dir (str): Path where temporary folders are created to host the files
generated by ROUGE application.
seed (int, optional): Random seed. Defaults to 42.
Return:
dictionary: rouge metric
"""
seed(random_seed)
random_number = random()
os.makedirs(temp_dir, exist_ok=True)
candidate_path = os.path.join(temp_dir, "candidate" + str(random_number))
gold_path = os.path.join(temp_dir, "gold" + str(random_number))
_write_list_to_file(predictions, candidate_path)
_write_list_to_file(targets, gold_path)
rouge = test_rouge(temp_dir, candidate_path, gold_path)
return rouge
| 32.292683 | 82 | 0.688822 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
from random import random, seed
from bertsum.others.utils import test_rouge
def get_rouge(predictions, targets, temp_dir, random_seed=42):
"""
function to get the rouge metric for the prediction and the reference.
Args:
predictions (list of strings): Predictions to be compared.
target (list of strings): References
temp_dir (str): Path where temporary folders are created to host the files
generated by ROUGE application.
seed (int, optional): Random seed. Defaults to 42.
Return:
dictionary: rouge metric
"""
def _write_list_to_file(list_items, filename):
with open(filename, "w") as filehandle:
# for cnt, line in enumerate(filehandle):
for item in list_items:
filehandle.write("%s\n" % item)
seed(random_seed)
random_number = random()
os.makedirs(temp_dir, exist_ok=True)
candidate_path = os.path.join(temp_dir, "candidate" + str(random_number))
gold_path = os.path.join(temp_dir, "gold" + str(random_number))
_write_list_to_file(predictions, candidate_path)
_write_list_to_file(targets, gold_path)
rouge = test_rouge(temp_dir, candidate_path, gold_path)
return rouge
| 211 | 0 | 27 |
a8422046d80155739467dee424ad8228f58634f6 | 1,045 | py | Python | BS01-flask-bootstrap-table-demo/app/__init__.py | AngelLiang/Flask-Demos | cf0a74885b873cb2583b3870ccdf3508d3af602e | [
"MIT"
] | 3 | 2020-06-17T05:44:48.000Z | 2021-09-11T02:49:38.000Z | BS01-flask-bootstrap-table-demo/app/__init__.py | AngelLiang/Flask-Demos | cf0a74885b873cb2583b3870ccdf3508d3af602e | [
"MIT"
] | 3 | 2021-06-08T20:57:03.000Z | 2022-02-23T14:54:59.000Z | BS01-flask-bootstrap-table-demo/app/__init__.py | AngelLiang/Flask-Demos | cf0a74885b873cb2583b3870ccdf3508d3af602e | [
"MIT"
] | 6 | 2020-06-17T05:44:56.000Z | 2022-03-29T12:53:05.000Z | from flask import Flask
from .extensions import db
from .models import Tree
app = Flask(__name__)
db.init_app(app)
db.app = app
# Create dummy secrey key so we can use sessions
app.config['SECRET_KEY'] = '123456790'
# Create in-memory database
# app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://data.sqlite'
from .views import * # noqa
@app.before_first_request
| 23.222222 | 64 | 0.578947 | from flask import Flask
from .extensions import db
from .models import Tree
app = Flask(__name__)
db.init_app(app)
db.app = app
# Create dummy secrey key so we can use sessions
app.config['SECRET_KEY'] = '123456790'
# Create in-memory database
# app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://data.sqlite'
from .views import * # noqa
def initdata(count=10):
db.drop_all()
db.create_all()
trunk = Tree(name="Trunk")
db.session.add(trunk)
for i in range(count):
branch = Tree()
branch.name = "Branch " + str(i+1)
branch.parent = trunk
db.session.add(branch)
for j in range(5):
leaf = Tree()
leaf.name = "Leaf " + str(j+1)
leaf.parent = branch
db.session.add(leaf)
for k in range(3):
item = Tree()
item.name = "Item " + str(k+1)
item.parent = leaf
db.session.add(item)
db.session.commit()
@app.before_first_request
def init_data():
initdata()
| 629 | 0 | 45 |
32feddd1d879cfa016bcef15524fa0b4df1a3d41 | 406 | py | Python | eeyore/stats/cov.py | papamarkou/eeyore | 4cd9b5a619cd095035aa93f348d1c937629aa8a3 | [
"MIT"
] | 6 | 2020-04-22T18:56:46.000Z | 2021-09-09T15:57:48.000Z | eeyore/stats/cov.py | papamarkou/eeyore | 4cd9b5a619cd095035aa93f348d1c937629aa8a3 | [
"MIT"
] | 19 | 2019-11-14T21:22:21.000Z | 2020-10-31T16:18:36.000Z | eeyore/stats/cov.py | scidom/eeyore | 4cd9b5a619cd095035aa93f348d1c937629aa8a3 | [
"MIT"
] | null | null | null | import torch
# https://discuss.pytorch.org/t/covariance-and-gradient-support/16217
| 25.375 | 69 | 0.583744 | import torch
# https://discuss.pytorch.org/t/covariance-and-gradient-support/16217
def cov(x, rowvar=False):
if x.dim() > 2:
raise ValueError('x has more than 2 dimensions')
if x.dim() < 2:
x = x.view(1, -1)
if not rowvar and x.size(0) != 1:
x = x.t()
x_ctr = x - torch.mean(x, dim=1, keepdim=True)
return x_ctr.matmul(x_ctr.t()).squeeze() / (x.size(1) - 1)
| 299 | 0 | 23 |
3078f6f7a835d165214af69930e334c0ce683c2e | 93 | py | Python | followers/apps.py | IanSeng/CMPUT404_PROJECT | 80acd2c57de4b091e0e66ad9f5f2df17801bf09e | [
"W3C-20150513"
] | 25 | 2020-08-30T19:28:01.000Z | 2022-02-18T19:18:14.000Z | followers/apps.py | IanSeng/CMPUT404_PROJECT | 80acd2c57de4b091e0e66ad9f5f2df17801bf09e | [
"W3C-20150513"
] | 81 | 2021-02-14T02:35:52.000Z | 2021-04-10T21:14:27.000Z | followers/apps.py | IanSeng/CMPUT404_PROJECT | 80acd2c57de4b091e0e66ad9f5f2df17801bf09e | [
"W3C-20150513"
] | 27 | 2020-09-06T08:00:49.000Z | 2022-02-01T06:15:08.000Z | from django.apps import AppConfig
| 15.5 | 33 | 0.763441 | from django.apps import AppConfig
class FollowersConfig(AppConfig):
name = 'followers'
| 0 | 35 | 23 |
82faabdefdf53899c72da40e04e2176c3d052adf | 212 | py | Python | WEEKS/CD_Sata-Structures/_MISC/misc-examples/python3-book-examples/compileall/compileall_path.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | WEEKS/CD_Sata-Structures/_MISC/misc-examples/python3-book-examples/compileall/compileall_path.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | WEEKS/CD_Sata-Structures/_MISC/misc-examples/python3-book-examples/compileall/compileall_path.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | # Copyright (c) 2009 Doug Hellmann All rights reserved.
#
"""
"""
# end_pymotw_header
import compileall
import sys
sys.path[:] = ["examples", "notthere"]
print("sys.path =", sys.path)
compileall.compile_path()
| 16.307692 | 55 | 0.70283 | # Copyright (c) 2009 Doug Hellmann All rights reserved.
#
"""
"""
# end_pymotw_header
import compileall
import sys
sys.path[:] = ["examples", "notthere"]
print("sys.path =", sys.path)
compileall.compile_path()
| 0 | 0 | 0 |
4b6a26436e528da9c6f39cd1f33f242f904440ad | 1,424 | py | Python | main.py | DwaraknathT/Diffusion-Models | dea059cbd7745aad1c535c0ee06fb15db0e3dd59 | [
"MIT"
] | 2 | 2022-03-18T18:46:31.000Z | 2022-03-23T08:36:06.000Z | main.py | DwaraknathT/Diffusion-Models | dea059cbd7745aad1c535c0ee06fb15db0e3dd59 | [
"MIT"
] | null | null | null | main.py | DwaraknathT/Diffusion-Models | dea059cbd7745aad1c535c0ee06fb15db0e3dd59 | [
"MIT"
] | null | null | null | import yaml
import argparse
from datasets import get_dataset
from diffusion.trainers import get_trainer
# The first arg parser parses out only the --config argument, this argument is used to
# load a yaml file containing key-values that override the defaults for the main parser below
config_parser = parser = argparse.ArgumentParser(
description="Training Config", add_help=False
)
parser.add_argument(
"-c",
"--config",
default="",
type=str,
metavar="FILE",
help="YAML config file specifying default arguments",
)
if __name__ == "__main__":
args, args_text = _parse_args()
print(args_text)
# Get Dataset
trainloader, testloader = get_dataset(args)
# Get trainer and train
trainer = get_trainer(args)
trainer.train(trainloader, testloader)
| 30.297872 | 93 | 0.711376 | import yaml
import argparse
from datasets import get_dataset
from diffusion.trainers import get_trainer
# The first arg parser parses out only the --config argument, this argument is used to
# load a yaml file containing key-values that override the defaults for the main parser below
config_parser = parser = argparse.ArgumentParser(
description="Training Config", add_help=False
)
parser.add_argument(
"-c",
"--config",
default="",
type=str,
metavar="FILE",
help="YAML config file specifying default arguments",
)
def _parse_args():
# Do we have a config file to parse?
args_config, remaining = config_parser.parse_known_args()
if args_config.config:
with open(args_config.config, "r") as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
# The main arg parser parses the rest of the args, the usual
# defaults will have been overridden if config file specified.
args = parser.parse_args(remaining)
# Cache the args as a text string to save them in the output dir later
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
return args, args_text
if __name__ == "__main__":
args, args_text = _parse_args()
print(args_text)
# Get Dataset
trainloader, testloader = get_dataset(args)
# Get trainer and train
trainer = get_trainer(args)
trainer.train(trainloader, testloader)
| 599 | 0 | 23 |
ab981234bd684eb515c2af0b043fcf592ef55044 | 15,143 | py | Python | pysnmp/CIENA-CES-ACL-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/CIENA-CES-ACL-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/CIENA-CES-ACL-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module CIENA-CES-ACL-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CIENA-CES-ACL-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:31:34 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "SingleValueConstraint")
cienaCesConfig, = mibBuilder.importSymbols("CIENA-SMI", "cienaCesConfig")
CienaGlobalState, = mibBuilder.importSymbols("CIENA-TC", "CienaGlobalState")
InetAddress, InetAddressType, InetAddressPrefixLength = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddress", "InetAddressType", "InetAddressPrefixLength")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
ModuleIdentity, ObjectIdentity, Unsigned32, Counter64, IpAddress, iso, Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, Integer32, TimeTicks, MibIdentifier, Counter32, Gauge32, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "ObjectIdentity", "Unsigned32", "Counter64", "IpAddress", "iso", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Integer32", "TimeTicks", "MibIdentifier", "Counter32", "Gauge32", "NotificationType")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
cienaCesAclMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25))
cienaCesAclMIB.setRevisions(('2012-11-21 00:00', '2012-05-01 00:00',))
if mibBuilder.loadTexts: cienaCesAclMIB.setLastUpdated('201211210000Z')
if mibBuilder.loadTexts: cienaCesAclMIB.setOrganization('Ciena, Inc')
cienaCesAclMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1))
cienaCesAclGlobal = MibIdentifier((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 1))
cienaCesAclRules = MibIdentifier((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2))
cienaCesAclMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 3))
cienaCesAclMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 3, 1))
cienaCesAclMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 3, 2))
cienaCesAclAdminState = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 1, 1), CienaGlobalState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclAdminState.setStatus('current')
cienaCesAclCacheHit = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclCacheHit.setStatus('current')
cienaCesAclNoHit = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclNoHit.setStatus('current')
cienaCesAclBadPort = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclBadPort.setStatus('current')
cienaCesAclBadDscp = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclBadDscp.setStatus('current')
cienaCesAclOperState = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 1, 6), CienaGlobalState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclOperState.setStatus('current')
cienaCesAclInUseEntries = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclInUseEntries.setStatus('current')
cienaCesAclMaxEntries = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclMaxEntries.setStatus('current')
cienaCesAclBadProtocol = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclBadProtocol.setStatus('current')
cienaCesAclTable = MibTable((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 1), )
if mibBuilder.loadTexts: cienaCesAclTable.setStatus('deprecated')
cienaCesAclEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 1, 1), ).setIndexNames((0, "CIENA-CES-ACL-MIB", "cienaCesAclEntryInetAddrType"), (0, "CIENA-CES-ACL-MIB", "cienaCesAclEntryInetAddr"), (0, "CIENA-CES-ACL-MIB", "cienaCesAclEntryInetPrefixLength"))
if mibBuilder.loadTexts: cienaCesAclEntry.setStatus('deprecated')
cienaCesAclEntryInetAddrType = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 1, 1, 1), InetAddressType())
if mibBuilder.loadTexts: cienaCesAclEntryInetAddrType.setStatus('deprecated')
cienaCesAclEntryInetAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 1, 1, 2), InetAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclEntryInetAddr.setStatus('deprecated')
cienaCesAclEntryInetPrefixLength = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 1, 1, 3), InetAddressPrefixLength())
if mibBuilder.loadTexts: cienaCesAclEntryInetPrefixLength.setStatus('deprecated')
cienaCesAclEntryHits = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclEntryHits.setStatus('deprecated')
cienaCesAclEntryBadPort = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclEntryBadPort.setStatus('deprecated')
cienaCesAclEntryDscpMask = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 1, 1, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclEntryDscpMask.setStatus('deprecated')
cienaCesAclEntryBadDscp = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclEntryBadDscp.setStatus('deprecated')
cienaCesAclEntryPortBitMask = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 1, 1, 8), OctetString().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclEntryPortBitMask.setStatus('deprecated')
cienaCesAclEntryNotifInetAddrType = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 1, 1, 9), InetAddressType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclEntryNotifInetAddrType.setStatus('deprecated')
cienaCesAclEntryNotifInetAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 1, 1, 10), InetAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclEntryNotifInetAddr.setStatus('deprecated')
cienaCesAclEntryNotifInetPrefixLength = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 1, 1, 11), InetAddressPrefixLength()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclEntryNotifInetPrefixLength.setStatus('deprecated')
cienaCesExtAclTable = MibTable((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2), )
if mibBuilder.loadTexts: cienaCesExtAclTable.setStatus('current')
cienaCesExtAclEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1), ).setIndexNames((0, "CIENA-CES-ACL-MIB", "cienaCesExtAclEntrySrcInetAddrType"), (0, "CIENA-CES-ACL-MIB", "cienaCesExtAclEntrySrcInetAddr"), (0, "CIENA-CES-ACL-MIB", "cienaCesExtAclEntrySrcInetPrefixLen"), (0, "CIENA-CES-ACL-MIB", "cienaCesExtAclEntryDstInetAddrType"), (0, "CIENA-CES-ACL-MIB", "cienaCesExtAclEntryDstInetAddr"), (0, "CIENA-CES-ACL-MIB", "cienaCesExtAclEntryDstInetPrefixLen"))
if mibBuilder.loadTexts: cienaCesExtAclEntry.setStatus('current')
cienaCesExtAclEntrySrcInetAddrType = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 1), InetAddressType())
if mibBuilder.loadTexts: cienaCesExtAclEntrySrcInetAddrType.setStatus('current')
cienaCesExtAclEntrySrcInetAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 2), InetAddress().subtype(subtypeSpec=ValueSizeConstraint(16, 16)).setFixedLength(16))
if mibBuilder.loadTexts: cienaCesExtAclEntrySrcInetAddr.setStatus('current')
cienaCesExtAclEntrySrcInetPrefixLen = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 3), InetAddressPrefixLength())
if mibBuilder.loadTexts: cienaCesExtAclEntrySrcInetPrefixLen.setStatus('current')
cienaCesExtAclEntryDstInetAddrType = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 4), InetAddressType())
if mibBuilder.loadTexts: cienaCesExtAclEntryDstInetAddrType.setStatus('current')
cienaCesExtAclEntryDstInetAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 5), InetAddress().subtype(subtypeSpec=ValueSizeConstraint(16, 16)).setFixedLength(16))
if mibBuilder.loadTexts: cienaCesExtAclEntryDstInetAddr.setStatus('current')
cienaCesExtAclEntryDstInetPrefixLen = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 6), InetAddressPrefixLength())
if mibBuilder.loadTexts: cienaCesExtAclEntryDstInetPrefixLen.setStatus('current')
cienaCesExtAclEntryNotifSrcInetAddrType = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 7), InetAddressType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesExtAclEntryNotifSrcInetAddrType.setStatus('current')
cienaCesExtAclEntryNotifSrcInetAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 8), InetAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesExtAclEntryNotifSrcInetAddr.setStatus('current')
cienaCesExtAclEntryNotifSrcInetPrefixLen = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 9), InetAddressPrefixLength()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesExtAclEntryNotifSrcInetPrefixLen.setStatus('current')
cienaCesExtAclEntryNotifDstInetAddrType = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 10), InetAddressType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesExtAclEntryNotifDstInetAddrType.setStatus('current')
cienaCesExtAclEntryNotifDstInetAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 11), InetAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesExtAclEntryNotifDstInetAddr.setStatus('current')
cienaCesExtAclEntryNotifDstInetPrefixLen = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 12), InetAddressPrefixLength()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesExtAclEntryNotifDstInetPrefixLen.setStatus('current')
cienaCesExtAclEntryHits = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesExtAclEntryHits.setStatus('current')
cienaCesExtAclEntryBadPort = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesExtAclEntryBadPort.setStatus('current')
cienaCesExtAclEntryDscpMask = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 15), OctetString().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesExtAclEntryDscpMask.setStatus('current')
cienaCesExtAclEntryBadDscp = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesExtAclEntryBadDscp.setStatus('current')
cienaCesExtAclEntryPortBitMask = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 17), OctetString().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesExtAclEntryPortBitMask.setStatus('current')
cienaCesExtAclEntryProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 18), Bits().clone(namedValues=NamedValues(("icmp", 0), ("tcp", 1), ("udp", 2), ("all", 15)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesExtAclEntryProtocol.setStatus('current')
cienaCesExtAclEntryBadProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 19), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesExtAclEntryBadProtocol.setStatus('current')
mibBuilder.exportSymbols("CIENA-CES-ACL-MIB", cienaCesAclNoHit=cienaCesAclNoHit, PYSNMP_MODULE_ID=cienaCesAclMIB, cienaCesExtAclEntryPortBitMask=cienaCesExtAclEntryPortBitMask, cienaCesExtAclEntryDstInetAddrType=cienaCesExtAclEntryDstInetAddrType, cienaCesAclEntryBadPort=cienaCesAclEntryBadPort, cienaCesAclInUseEntries=cienaCesAclInUseEntries, cienaCesExtAclEntrySrcInetAddr=cienaCesExtAclEntrySrcInetAddr, cienaCesExtAclEntryNotifDstInetAddrType=cienaCesExtAclEntryNotifDstInetAddrType, cienaCesExtAclEntryHits=cienaCesExtAclEntryHits, cienaCesAclTable=cienaCesAclTable, cienaCesAclBadProtocol=cienaCesAclBadProtocol, cienaCesAclEntry=cienaCesAclEntry, cienaCesExtAclEntryBadDscp=cienaCesExtAclEntryBadDscp, cienaCesExtAclEntryDstInetAddr=cienaCesExtAclEntryDstInetAddr, cienaCesAclEntryHits=cienaCesAclEntryHits, cienaCesExtAclEntryProtocol=cienaCesExtAclEntryProtocol, cienaCesAclMIBConformance=cienaCesAclMIBConformance, cienaCesAclEntryInetPrefixLength=cienaCesAclEntryInetPrefixLength, cienaCesAclMIBCompliances=cienaCesAclMIBCompliances, cienaCesAclEntryNotifInetAddr=cienaCesAclEntryNotifInetAddr, cienaCesExtAclEntryNotifSrcInetPrefixLen=cienaCesExtAclEntryNotifSrcInetPrefixLen, cienaCesExtAclEntryBadProtocol=cienaCesExtAclEntryBadProtocol, cienaCesAclEntryBadDscp=cienaCesAclEntryBadDscp, cienaCesAclMIBObjects=cienaCesAclMIBObjects, cienaCesAclOperState=cienaCesAclOperState, cienaCesExtAclTable=cienaCesExtAclTable, cienaCesAclEntryNotifInetPrefixLength=cienaCesAclEntryNotifInetPrefixLength, cienaCesAclEntryInetAddr=cienaCesAclEntryInetAddr, cienaCesExtAclEntryNotifSrcInetAddr=cienaCesExtAclEntryNotifSrcInetAddr, cienaCesAclMIBGroups=cienaCesAclMIBGroups, cienaCesAclGlobal=cienaCesAclGlobal, cienaCesAclEntryInetAddrType=cienaCesAclEntryInetAddrType, cienaCesExtAclEntryNotifDstInetAddr=cienaCesExtAclEntryNotifDstInetAddr, cienaCesAclEntryPortBitMask=cienaCesAclEntryPortBitMask, cienaCesExtAclEntryDstInetPrefixLen=cienaCesExtAclEntryDstInetPrefixLen, cienaCesExtAclEntryNotifSrcInetAddrType=cienaCesExtAclEntryNotifSrcInetAddrType, cienaCesExtAclEntryBadPort=cienaCesExtAclEntryBadPort, cienaCesExtAclEntrySrcInetAddrType=cienaCesExtAclEntrySrcInetAddrType, cienaCesExtAclEntryDscpMask=cienaCesExtAclEntryDscpMask, cienaCesAclRules=cienaCesAclRules, cienaCesAclEntryDscpMask=cienaCesAclEntryDscpMask, cienaCesAclEntryNotifInetAddrType=cienaCesAclEntryNotifInetAddrType, cienaCesAclMIB=cienaCesAclMIB, cienaCesAclCacheHit=cienaCesAclCacheHit, cienaCesAclBadPort=cienaCesAclBadPort, cienaCesExtAclEntry=cienaCesExtAclEntry, cienaCesExtAclEntrySrcInetPrefixLen=cienaCesExtAclEntrySrcInetPrefixLen, cienaCesAclAdminState=cienaCesAclAdminState, cienaCesExtAclEntryNotifDstInetPrefixLen=cienaCesExtAclEntryNotifDstInetPrefixLen, cienaCesAclBadDscp=cienaCesAclBadDscp, cienaCesAclMaxEntries=cienaCesAclMaxEntries)
| 132.833333 | 2,830 | 0.783861 | #
# PySNMP MIB module CIENA-CES-ACL-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CIENA-CES-ACL-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:31:34 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "SingleValueConstraint")
cienaCesConfig, = mibBuilder.importSymbols("CIENA-SMI", "cienaCesConfig")
CienaGlobalState, = mibBuilder.importSymbols("CIENA-TC", "CienaGlobalState")
InetAddress, InetAddressType, InetAddressPrefixLength = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddress", "InetAddressType", "InetAddressPrefixLength")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
ModuleIdentity, ObjectIdentity, Unsigned32, Counter64, IpAddress, iso, Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, Integer32, TimeTicks, MibIdentifier, Counter32, Gauge32, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "ObjectIdentity", "Unsigned32", "Counter64", "IpAddress", "iso", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Integer32", "TimeTicks", "MibIdentifier", "Counter32", "Gauge32", "NotificationType")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
cienaCesAclMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25))
cienaCesAclMIB.setRevisions(('2012-11-21 00:00', '2012-05-01 00:00',))
if mibBuilder.loadTexts: cienaCesAclMIB.setLastUpdated('201211210000Z')
if mibBuilder.loadTexts: cienaCesAclMIB.setOrganization('Ciena, Inc')
cienaCesAclMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1))
cienaCesAclGlobal = MibIdentifier((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 1))
cienaCesAclRules = MibIdentifier((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2))
cienaCesAclMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 3))
cienaCesAclMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 3, 1))
cienaCesAclMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 3, 2))
cienaCesAclAdminState = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 1, 1), CienaGlobalState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclAdminState.setStatus('current')
cienaCesAclCacheHit = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclCacheHit.setStatus('current')
cienaCesAclNoHit = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclNoHit.setStatus('current')
cienaCesAclBadPort = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclBadPort.setStatus('current')
cienaCesAclBadDscp = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclBadDscp.setStatus('current')
cienaCesAclOperState = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 1, 6), CienaGlobalState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclOperState.setStatus('current')
cienaCesAclInUseEntries = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclInUseEntries.setStatus('current')
cienaCesAclMaxEntries = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclMaxEntries.setStatus('current')
cienaCesAclBadProtocol = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclBadProtocol.setStatus('current')
cienaCesAclTable = MibTable((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 1), )
if mibBuilder.loadTexts: cienaCesAclTable.setStatus('deprecated')
cienaCesAclEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 1, 1), ).setIndexNames((0, "CIENA-CES-ACL-MIB", "cienaCesAclEntryInetAddrType"), (0, "CIENA-CES-ACL-MIB", "cienaCesAclEntryInetAddr"), (0, "CIENA-CES-ACL-MIB", "cienaCesAclEntryInetPrefixLength"))
if mibBuilder.loadTexts: cienaCesAclEntry.setStatus('deprecated')
cienaCesAclEntryInetAddrType = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 1, 1, 1), InetAddressType())
if mibBuilder.loadTexts: cienaCesAclEntryInetAddrType.setStatus('deprecated')
cienaCesAclEntryInetAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 1, 1, 2), InetAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclEntryInetAddr.setStatus('deprecated')
cienaCesAclEntryInetPrefixLength = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 1, 1, 3), InetAddressPrefixLength())
if mibBuilder.loadTexts: cienaCesAclEntryInetPrefixLength.setStatus('deprecated')
cienaCesAclEntryHits = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclEntryHits.setStatus('deprecated')
cienaCesAclEntryBadPort = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclEntryBadPort.setStatus('deprecated')
cienaCesAclEntryDscpMask = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 1, 1, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclEntryDscpMask.setStatus('deprecated')
cienaCesAclEntryBadDscp = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclEntryBadDscp.setStatus('deprecated')
cienaCesAclEntryPortBitMask = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 1, 1, 8), OctetString().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclEntryPortBitMask.setStatus('deprecated')
cienaCesAclEntryNotifInetAddrType = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 1, 1, 9), InetAddressType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclEntryNotifInetAddrType.setStatus('deprecated')
cienaCesAclEntryNotifInetAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 1, 1, 10), InetAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclEntryNotifInetAddr.setStatus('deprecated')
cienaCesAclEntryNotifInetPrefixLength = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 1, 1, 11), InetAddressPrefixLength()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclEntryNotifInetPrefixLength.setStatus('deprecated')
cienaCesExtAclTable = MibTable((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2), )
if mibBuilder.loadTexts: cienaCesExtAclTable.setStatus('current')
cienaCesExtAclEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1), ).setIndexNames((0, "CIENA-CES-ACL-MIB", "cienaCesExtAclEntrySrcInetAddrType"), (0, "CIENA-CES-ACL-MIB", "cienaCesExtAclEntrySrcInetAddr"), (0, "CIENA-CES-ACL-MIB", "cienaCesExtAclEntrySrcInetPrefixLen"), (0, "CIENA-CES-ACL-MIB", "cienaCesExtAclEntryDstInetAddrType"), (0, "CIENA-CES-ACL-MIB", "cienaCesExtAclEntryDstInetAddr"), (0, "CIENA-CES-ACL-MIB", "cienaCesExtAclEntryDstInetPrefixLen"))
if mibBuilder.loadTexts: cienaCesExtAclEntry.setStatus('current')
cienaCesExtAclEntrySrcInetAddrType = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 1), InetAddressType())
if mibBuilder.loadTexts: cienaCesExtAclEntrySrcInetAddrType.setStatus('current')
cienaCesExtAclEntrySrcInetAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 2), InetAddress().subtype(subtypeSpec=ValueSizeConstraint(16, 16)).setFixedLength(16))
if mibBuilder.loadTexts: cienaCesExtAclEntrySrcInetAddr.setStatus('current')
cienaCesExtAclEntrySrcInetPrefixLen = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 3), InetAddressPrefixLength())
if mibBuilder.loadTexts: cienaCesExtAclEntrySrcInetPrefixLen.setStatus('current')
cienaCesExtAclEntryDstInetAddrType = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 4), InetAddressType())
if mibBuilder.loadTexts: cienaCesExtAclEntryDstInetAddrType.setStatus('current')
cienaCesExtAclEntryDstInetAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 5), InetAddress().subtype(subtypeSpec=ValueSizeConstraint(16, 16)).setFixedLength(16))
if mibBuilder.loadTexts: cienaCesExtAclEntryDstInetAddr.setStatus('current')
cienaCesExtAclEntryDstInetPrefixLen = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 6), InetAddressPrefixLength())
if mibBuilder.loadTexts: cienaCesExtAclEntryDstInetPrefixLen.setStatus('current')
cienaCesExtAclEntryNotifSrcInetAddrType = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 7), InetAddressType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesExtAclEntryNotifSrcInetAddrType.setStatus('current')
cienaCesExtAclEntryNotifSrcInetAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 8), InetAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesExtAclEntryNotifSrcInetAddr.setStatus('current')
cienaCesExtAclEntryNotifSrcInetPrefixLen = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 9), InetAddressPrefixLength()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesExtAclEntryNotifSrcInetPrefixLen.setStatus('current')
cienaCesExtAclEntryNotifDstInetAddrType = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 10), InetAddressType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesExtAclEntryNotifDstInetAddrType.setStatus('current')
cienaCesExtAclEntryNotifDstInetAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 11), InetAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesExtAclEntryNotifDstInetAddr.setStatus('current')
cienaCesExtAclEntryNotifDstInetPrefixLen = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 12), InetAddressPrefixLength()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesExtAclEntryNotifDstInetPrefixLen.setStatus('current')
cienaCesExtAclEntryHits = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesExtAclEntryHits.setStatus('current')
cienaCesExtAclEntryBadPort = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesExtAclEntryBadPort.setStatus('current')
cienaCesExtAclEntryDscpMask = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 15), OctetString().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesExtAclEntryDscpMask.setStatus('current')
cienaCesExtAclEntryBadDscp = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesExtAclEntryBadDscp.setStatus('current')
cienaCesExtAclEntryPortBitMask = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 17), OctetString().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesExtAclEntryPortBitMask.setStatus('current')
cienaCesExtAclEntryProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 18), Bits().clone(namedValues=NamedValues(("icmp", 0), ("tcp", 1), ("udp", 2), ("all", 15)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesExtAclEntryProtocol.setStatus('current')
cienaCesExtAclEntryBadProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 25, 1, 2, 2, 1, 19), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesExtAclEntryBadProtocol.setStatus('current')
mibBuilder.exportSymbols("CIENA-CES-ACL-MIB", cienaCesAclNoHit=cienaCesAclNoHit, PYSNMP_MODULE_ID=cienaCesAclMIB, cienaCesExtAclEntryPortBitMask=cienaCesExtAclEntryPortBitMask, cienaCesExtAclEntryDstInetAddrType=cienaCesExtAclEntryDstInetAddrType, cienaCesAclEntryBadPort=cienaCesAclEntryBadPort, cienaCesAclInUseEntries=cienaCesAclInUseEntries, cienaCesExtAclEntrySrcInetAddr=cienaCesExtAclEntrySrcInetAddr, cienaCesExtAclEntryNotifDstInetAddrType=cienaCesExtAclEntryNotifDstInetAddrType, cienaCesExtAclEntryHits=cienaCesExtAclEntryHits, cienaCesAclTable=cienaCesAclTable, cienaCesAclBadProtocol=cienaCesAclBadProtocol, cienaCesAclEntry=cienaCesAclEntry, cienaCesExtAclEntryBadDscp=cienaCesExtAclEntryBadDscp, cienaCesExtAclEntryDstInetAddr=cienaCesExtAclEntryDstInetAddr, cienaCesAclEntryHits=cienaCesAclEntryHits, cienaCesExtAclEntryProtocol=cienaCesExtAclEntryProtocol, cienaCesAclMIBConformance=cienaCesAclMIBConformance, cienaCesAclEntryInetPrefixLength=cienaCesAclEntryInetPrefixLength, cienaCesAclMIBCompliances=cienaCesAclMIBCompliances, cienaCesAclEntryNotifInetAddr=cienaCesAclEntryNotifInetAddr, cienaCesExtAclEntryNotifSrcInetPrefixLen=cienaCesExtAclEntryNotifSrcInetPrefixLen, cienaCesExtAclEntryBadProtocol=cienaCesExtAclEntryBadProtocol, cienaCesAclEntryBadDscp=cienaCesAclEntryBadDscp, cienaCesAclMIBObjects=cienaCesAclMIBObjects, cienaCesAclOperState=cienaCesAclOperState, cienaCesExtAclTable=cienaCesExtAclTable, cienaCesAclEntryNotifInetPrefixLength=cienaCesAclEntryNotifInetPrefixLength, cienaCesAclEntryInetAddr=cienaCesAclEntryInetAddr, cienaCesExtAclEntryNotifSrcInetAddr=cienaCesExtAclEntryNotifSrcInetAddr, cienaCesAclMIBGroups=cienaCesAclMIBGroups, cienaCesAclGlobal=cienaCesAclGlobal, cienaCesAclEntryInetAddrType=cienaCesAclEntryInetAddrType, cienaCesExtAclEntryNotifDstInetAddr=cienaCesExtAclEntryNotifDstInetAddr, cienaCesAclEntryPortBitMask=cienaCesAclEntryPortBitMask, cienaCesExtAclEntryDstInetPrefixLen=cienaCesExtAclEntryDstInetPrefixLen, cienaCesExtAclEntryNotifSrcInetAddrType=cienaCesExtAclEntryNotifSrcInetAddrType, cienaCesExtAclEntryBadPort=cienaCesExtAclEntryBadPort, cienaCesExtAclEntrySrcInetAddrType=cienaCesExtAclEntrySrcInetAddrType, cienaCesExtAclEntryDscpMask=cienaCesExtAclEntryDscpMask, cienaCesAclRules=cienaCesAclRules, cienaCesAclEntryDscpMask=cienaCesAclEntryDscpMask, cienaCesAclEntryNotifInetAddrType=cienaCesAclEntryNotifInetAddrType, cienaCesAclMIB=cienaCesAclMIB, cienaCesAclCacheHit=cienaCesAclCacheHit, cienaCesAclBadPort=cienaCesAclBadPort, cienaCesExtAclEntry=cienaCesExtAclEntry, cienaCesExtAclEntrySrcInetPrefixLen=cienaCesExtAclEntrySrcInetPrefixLen, cienaCesAclAdminState=cienaCesAclAdminState, cienaCesExtAclEntryNotifDstInetPrefixLen=cienaCesExtAclEntryNotifDstInetPrefixLen, cienaCesAclBadDscp=cienaCesAclBadDscp, cienaCesAclMaxEntries=cienaCesAclMaxEntries)
| 0 | 0 | 0 |
c903af9ef97633a516d28b47dfa7db93f03ab9a0 | 1,218 | py | Python | src/terial/classifier/opensurfaces/resize.py | keunhong/photoshape | 6e795512e059bc5a6bdac748fda961f66d51c6f6 | [
"PostgreSQL"
] | 81 | 2018-10-10T06:55:41.000Z | 2022-03-01T04:18:23.000Z | src/terial/classifier/opensurfaces/resize.py | keunhong/photoshape | 6e795512e059bc5a6bdac748fda961f66d51c6f6 | [
"PostgreSQL"
] | 17 | 2018-10-22T04:50:59.000Z | 2022-02-12T00:29:11.000Z | src/terial/classifier/opensurfaces/resize.py | keunhong/photoshape | 6e795512e059bc5a6bdac748fda961f66d51c6f6 | [
"PostgreSQL"
] | 16 | 2018-11-20T06:57:32.000Z | 2021-12-24T07:09:37.000Z | import argparse
from functools import partial
from multiprocessing import Pool
from pathlib import Path
from PIL import Image
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument(dest='base_dir', type=Path)
parser.add_argument(dest='out_dir', type=Path)
args = parser.parse_args()
if __name__ == '__main__':
main()
| 27.066667 | 64 | 0.65353 | import argparse
from functools import partial
from multiprocessing import Pool
from pathlib import Path
from PIL import Image
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument(dest='base_dir', type=Path)
parser.add_argument(dest='out_dir', type=Path)
args = parser.parse_args()
def resize_and_save(photo_path, base_dir, out_dir):
photo_id = photo_path.stem
out_path = args.out_dir / photo_path.name
if out_path.exists():
return
label_path = base_dir / 'photos-labels' / f'{photo_id}.png'
photo_im = Image.open(str(photo_path))
label_im = Image.open(str(label_path))
photo_im = photo_im.resize(label_im.size)
photo_im.save(out_path)
def main():
photos_dir = args.base_dir / 'photos'
args.out_dir.mkdir(exist_ok=True, parents=True)
photo_paths = list(photos_dir.glob('*.jpg'))
pbar = tqdm(photo_paths)
pool = Pool(processes=4)
for i in pool.imap_unordered(partial(resize_and_save,
base_dir=args.base_dir,
out_dir=args.out_dir),
photo_paths):
pbar.update(1)
if __name__ == '__main__':
main()
| 822 | 0 | 46 |
7185ca21d2152e85a78bb4b577624ecfce010843 | 1,153 | py | Python | helpers/getPrice.py | v1s1t0r999/Novell | 4499e5c0634b12dfbd5b5e8ba671b579b7ac5be9 | [
"MIT"
] | 8 | 2021-08-13T03:00:34.000Z | 2021-08-22T05:08:03.000Z | helpers/getPrice.py | v1s1t0r999/Novell | 4499e5c0634b12dfbd5b5e8ba671b579b7ac5be9 | [
"MIT"
] | 9 | 2021-08-15T20:27:59.000Z | 2021-09-06T20:22:36.000Z | helpers/getPrice.py | v1s1t0r999/Novell | 4499e5c0634b12dfbd5b5e8ba671b579b7ac5be9 | [
"MIT"
] | 9 | 2021-08-14T16:43:04.000Z | 2021-09-07T19:14:33.000Z | import requests
from helpers.logHelper import logger
symbolNamePairs = {
"BITCOIN": "BTC",
"ETHEREUM": "ETH",
"DOGECOIN": "DOGE",
}
setting = settings()
| 25.622222 | 83 | 0.616652 | import requests
from helpers.logHelper import logger
symbolNamePairs = {
"BITCOIN": "BTC",
"ETHEREUM": "ETH",
"DOGECOIN": "DOGE",
}
class settings:
def __init__(self):
self.endpoint = "https://api.binance.com"
setting = settings()
def request(method, path, params=None):
try:
resp = requests.request(method, setting.endpoint + path, params=params)
data = resp.json()
return data
except Exception as e:
logger.warning(f"Exception caught in requests function: {e}")
def getPrice(symbol):
symbol = symbol.upper()
if symbol in symbolNamePairs.keys():
symbol = symbolNamePairs[symbol]
try:
data = request("GET", "/api/v3/ticker/price", {"symbol": symbol + "USDT"})
price = str(data["price"])
return [price, symbol]
except Exception as e:
logger.warning(f"Exception caught in getPrice function: {e}")
def getCost(symbol, amount):
symbol = symbol.upper()
price_and_coin = getPrice(symbol)
current = float(price_and_coin[0])
return [current * amount, price_and_coin[1]]
| 845 | -6 | 127 |
aff4de3fc4484ab22adaf2ed2a6838f63f81a8ff | 5,721 | py | Python | vmpy/streams.py | sladkovm/Velometria.py | 22c97723f3b5ba5342a6178535f48cc426daac2f | [
"MIT"
] | 2 | 2016-09-04T09:26:03.000Z | 2017-07-27T05:52:06.000Z | vmpy/streams.py | sladkovm/Velometria.py | 22c97723f3b5ba5342a6178535f48cc426daac2f | [
"MIT"
] | 4 | 2016-08-03T17:54:12.000Z | 2016-08-09T20:11:45.000Z | vmpy/streams.py | sladkovm/Velometria_py | 22c97723f3b5ba5342a6178535f48cc426daac2f | [
"MIT"
] | null | null | null | """Operation on Streams that leave the shape of the stream unchanged"""
import numpy as np
import pandas as pd
from vmpy.utils import cast_array_to_original_type
# FTP based 7-zones with left bind edge set to -0.001
POWER_ZONES_THRESHOLD = [-0.001, 0.55, 0.75, 0.9, 1.05, 1.2, 1.5, 10.0]
POWER_ZONES_THRESHOLD_DESC = ["Active Recovery", "Endurance", "Tempo",
"Threshold", "VO2Max", "Anaerobic", "Neuromuscular",]
POWER_ZONES_THRESHOLD_ZNAME = ["Z1", "Z2", "Z3", "Z4", "Z5", "Z6", "Z7"]
# LTHR based 5-zones with left bind edge set to -0.001
HEART_RATE_ZONES = [-0.001, 0.68, 0.83, 0.94, 1.05, 10.0]
HEART_RATE_ZONES_DESC = ["Active recovery", "Endurance", "Tempo", "Threshold", "VO2Max",]
HEART_RATE_ZONES_ZNAME = ["Z1", "Z2", "Z3", "Z4", "Z5"]
def compute_zones(arg, **kwargs):
"""Convert stream into respective zones stream
Watts streams can be converted either into ftp based 7-zones or into custom zones
HR streams can be converted either in lthr based 5-zones or into custom zones
One of three *ftp*, *lthr* or *zone* keyword parameters must be provided
Parameters
----------
arg : array-like
ftp : number, optional
Value for FTP, will be used for 7-zones calculation
lthr: number, optional
Value for LTHR, will be used for 5-zones calculation
zones: list, optional
List of custom defined zones with left edge set to -1 and right edge to 10000
Returns
-------
array-like of int, the same type as arg
"""
arg_s = pd.Series(arg)
if kwargs.get('zones', None):
abs_zones = kwargs.get('zones')
elif kwargs.get('ftp', None):
abs_zones = np.asarray(POWER_ZONES_THRESHOLD) * kwargs.get('ftp')
elif kwargs.get('lthr', None):
abs_zones = np.asarray(HEART_RATE_ZONES) * kwargs.get('lthr')
else:
raise ValueError
labels = kwargs.get('labels', list(range(1, len(abs_zones))))
assert len(abs_zones) == (len(labels) + 1)
y = pd.cut(arg_s, bins=abs_zones, labels=labels)
y = cast_array_to_original_type(y, type(arg))
return y
def wpk(power, weight):
"""Watts per kilo
Parameters
----------
power : list, ndarray, series
weight : number
Returns
-------
array-like
"""
rv = pd.Series(power, dtype=float)/ weight
rv = cast_array_to_original_type(rv, type(power))
return rv
def mask_fill(arg, mask=None, value=0.0, **kwargs):
"""Replace masked values
Parameters
----------
arg : array-like
mask : array-like of bools, optional
Default value is None, which means no masking will be applied
value : number, optional
Value to use for replacement, default=0.0
Returns
-------
y: type of input argument
In case the arg is an ndarray all operations will be performed on the original array.
To preserve original array pass a copy to the function
"""
if mask is None:
return arg
y = np.array(arg)
mask = np.array(mask, dtype=bool)
y[~mask] = value
rv = cast_array_to_original_type(y, type(arg))
return rv
def median_filter(arg, window=31, threshold=1, value=None, **kwargs):
"""Outlier replacement using median filter
Detect outliers using median filter and replace with rolling median or specified value
Parameters
----------
arg : array-like
window : int, optional
Size of window (including the sample; default=31 is equal to 15 on either side of value)
threshold : number, optional
default=3 and corresponds to 2xSigma
value : float, optional
Value to be used for replacement, default=None, which means replacement by rolling median value
Returns
-------
y: type of input argument
In case the arg is an ndarray all operations will be performed on the original array.
To preserve original array pass a copy to the function
"""
y = pd.Series(arg)
rolling_median = y.rolling(window, min_periods=1).median()
difference = np.abs(y - rolling_median)
median_abs_deviation = difference.rolling(window, min_periods=1).median()
outlier_idx = difference > 1.4826 * threshold * median_abs_deviation
""" The factor 1.4826 makes the MAD scale estimate
an unbiased estimate of the standard deviation for Gaussian data.
"""
if value:
y[outlier_idx] = value
else:
y[outlier_idx] = rolling_median[outlier_idx]
y = y.as_matrix()
y = cast_array_to_original_type(y, type(arg))
return y
def rolling_mean(arg, window=10, mask=None, value=0.0, **kwargs):
"""Compute rolling mean
Compute *uniform* or *ewma* rolling mean of the stream. In-process masking with replacement is
controlled by optional keyword parameters
Parameters
----------
arg : array-like
window : int
Size of the moving window in sec, default=10
mask : array-like of boolean, optional
Default value is None, which means no masking will be applied
value : number, optional
Value to use for replacement, default=0.0
type : {"uniform", "emwa"}, optional
Type of averaging, default="uniform"
Returns
-------
y: type of input argument
The moving array will indicate which samples to set to zero before
applying rolling mean.
"""
if mask is not None:
arg = mask_fill(arg, mask, value, **kwargs)
y = pd.Series(arg)
if kwargs.get('type', 'uniform') == 'ewma':
y = y.ewm(span=window, min_periods=1).mean().values
else:
y = y.rolling(window, min_periods=1).mean().values
y = cast_array_to_original_type(y, type(arg))
return y
| 28.044118 | 103 | 0.650586 | """Operation on Streams that leave the shape of the stream unchanged"""
import numpy as np
import pandas as pd
from vmpy.utils import cast_array_to_original_type
# FTP based 7-zones with left bind edge set to -0.001
POWER_ZONES_THRESHOLD = [-0.001, 0.55, 0.75, 0.9, 1.05, 1.2, 1.5, 10.0]
POWER_ZONES_THRESHOLD_DESC = ["Active Recovery", "Endurance", "Tempo",
"Threshold", "VO2Max", "Anaerobic", "Neuromuscular",]
POWER_ZONES_THRESHOLD_ZNAME = ["Z1", "Z2", "Z3", "Z4", "Z5", "Z6", "Z7"]
# LTHR based 5-zones with left bind edge set to -0.001
HEART_RATE_ZONES = [-0.001, 0.68, 0.83, 0.94, 1.05, 10.0]
HEART_RATE_ZONES_DESC = ["Active recovery", "Endurance", "Tempo", "Threshold", "VO2Max",]
HEART_RATE_ZONES_ZNAME = ["Z1", "Z2", "Z3", "Z4", "Z5"]
def compute_zones(arg, **kwargs):
"""Convert stream into respective zones stream
Watts streams can be converted either into ftp based 7-zones or into custom zones
HR streams can be converted either in lthr based 5-zones or into custom zones
One of three *ftp*, *lthr* or *zone* keyword parameters must be provided
Parameters
----------
arg : array-like
ftp : number, optional
Value for FTP, will be used for 7-zones calculation
lthr: number, optional
Value for LTHR, will be used for 5-zones calculation
zones: list, optional
List of custom defined zones with left edge set to -1 and right edge to 10000
Returns
-------
array-like of int, the same type as arg
"""
arg_s = pd.Series(arg)
if kwargs.get('zones', None):
abs_zones = kwargs.get('zones')
elif kwargs.get('ftp', None):
abs_zones = np.asarray(POWER_ZONES_THRESHOLD) * kwargs.get('ftp')
elif kwargs.get('lthr', None):
abs_zones = np.asarray(HEART_RATE_ZONES) * kwargs.get('lthr')
else:
raise ValueError
labels = kwargs.get('labels', list(range(1, len(abs_zones))))
assert len(abs_zones) == (len(labels) + 1)
y = pd.cut(arg_s, bins=abs_zones, labels=labels)
y = cast_array_to_original_type(y, type(arg))
return y
def wpk(power, weight):
"""Watts per kilo
Parameters
----------
power : list, ndarray, series
weight : number
Returns
-------
array-like
"""
rv = pd.Series(power, dtype=float)/ weight
rv = cast_array_to_original_type(rv, type(power))
return rv
def mask_fill(arg, mask=None, value=0.0, **kwargs):
"""Replace masked values
Parameters
----------
arg : array-like
mask : array-like of bools, optional
Default value is None, which means no masking will be applied
value : number, optional
Value to use for replacement, default=0.0
Returns
-------
y: type of input argument
In case the arg is an ndarray all operations will be performed on the original array.
To preserve original array pass a copy to the function
"""
if mask is None:
return arg
y = np.array(arg)
mask = np.array(mask, dtype=bool)
y[~mask] = value
rv = cast_array_to_original_type(y, type(arg))
return rv
def median_filter(arg, window=31, threshold=1, value=None, **kwargs):
"""Outlier replacement using median filter
Detect outliers using median filter and replace with rolling median or specified value
Parameters
----------
arg : array-like
window : int, optional
Size of window (including the sample; default=31 is equal to 15 on either side of value)
threshold : number, optional
default=3 and corresponds to 2xSigma
value : float, optional
Value to be used for replacement, default=None, which means replacement by rolling median value
Returns
-------
y: type of input argument
In case the arg is an ndarray all operations will be performed on the original array.
To preserve original array pass a copy to the function
"""
y = pd.Series(arg)
rolling_median = y.rolling(window, min_periods=1).median()
difference = np.abs(y - rolling_median)
median_abs_deviation = difference.rolling(window, min_periods=1).median()
outlier_idx = difference > 1.4826 * threshold * median_abs_deviation
""" The factor 1.4826 makes the MAD scale estimate
an unbiased estimate of the standard deviation for Gaussian data.
"""
if value:
y[outlier_idx] = value
else:
y[outlier_idx] = rolling_median[outlier_idx]
y = y.as_matrix()
y = cast_array_to_original_type(y, type(arg))
return y
def rolling_mean(arg, window=10, mask=None, value=0.0, **kwargs):
"""Compute rolling mean
Compute *uniform* or *ewma* rolling mean of the stream. In-process masking with replacement is
controlled by optional keyword parameters
Parameters
----------
arg : array-like
window : int
Size of the moving window in sec, default=10
mask : array-like of boolean, optional
Default value is None, which means no masking will be applied
value : number, optional
Value to use for replacement, default=0.0
type : {"uniform", "emwa"}, optional
Type of averaging, default="uniform"
Returns
-------
y: type of input argument
The moving array will indicate which samples to set to zero before
applying rolling mean.
"""
if mask is not None:
arg = mask_fill(arg, mask, value, **kwargs)
y = pd.Series(arg)
if kwargs.get('type', 'uniform') == 'ewma':
y = y.ewm(span=window, min_periods=1).mean().values
else:
y = y.rolling(window, min_periods=1).mean().values
y = cast_array_to_original_type(y, type(arg))
return y
| 0 | 0 | 0 |
9ba8c51d46d3e3446c3291a3adefab5e9344eac3 | 1,577 | py | Python | arividam/djangocms_news/cms_plugins.py | c4sc/arividam | b728322d59ec48d6811ed7a709157a594e5653d4 | [
"MIT"
] | 3 | 2016-05-26T06:03:11.000Z | 2016-07-09T07:12:22.000Z | arividam/djangocms_news/cms_plugins.py | c4sc/arividam | b728322d59ec48d6811ed7a709157a594e5653d4 | [
"MIT"
] | 33 | 2016-05-26T05:33:00.000Z | 2017-12-06T12:08:17.000Z | arividam/djangocms_news/cms_plugins.py | c4sc/arividam | b728322d59ec48d6811ed7a709157a594e5653d4 | [
"MIT"
] | null | null | null | from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cms.models.pluginmodel import CMSPlugin
from django.utils.translation import ugettext_lazy as _
from cms.models import Page
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from arividam.utils import get_page_by_slug
from .models import PromotedNews
import logging
logger = logging.getLogger(__name__)
plugin_pool.register_plugin(NewsPlugin)
plugin_pool.register_plugin(FeaturedNewsPlugin)
| 32.183673 | 93 | 0.701966 | from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cms.models.pluginmodel import CMSPlugin
from django.utils.translation import ugettext_lazy as _
from cms.models import Page
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from arividam.utils import get_page_by_slug
from .models import PromotedNews
import logging
logger = logging.getLogger(__name__)
class NewsPlugin(CMSPluginBase):
model = CMSPlugin
name = _("News")
render_template = "djangocms_news/plugin.html"
cache = False
def render(self, context, instance, placeholder):
news = get_page_by_slug('news')
children = news.children.order_by("-publication_date")[:3]
pages = [{'title': child.get_title(settings.LANGUAGE_CODE),
'content': child.get_placeholders()[0].render(context, None),
'id': child.pk
} for child in children]
context.update({
'news': pages
})
return context
class FeaturedNewsPlugin(CMSPluginBase):
model = CMSPlugin
name = _("Featured News")
render_template = "djangocms_news/featured.html"
cache = False
def render(self, context, instance, placeholder):
site = get_current_site(context['request'])
news = PromotedNews.objects.filter(site=site).order_by("-page__publication_date")[:5]
context.update({
"news": news
})
return context
plugin_pool.register_plugin(NewsPlugin)
plugin_pool.register_plugin(FeaturedNewsPlugin)
| 688 | 319 | 46 |
b4817a06e5f0a3ae68d8d96eca0ab9f4ed6b270c | 1,212 | py | Python | util/update.py | suchak1/hyperdrive | 8bc78af179de8d2b26968683d3248840f7470d4c | [
"MIT"
] | 20 | 2020-11-03T10:20:32.000Z | 2022-03-01T13:28:39.000Z | util/update.py | suchak1/hyperdrive | 8bc78af179de8d2b26968683d3248840f7470d4c | [
"MIT"
] | 70 | 2020-11-05T08:06:57.000Z | 2022-03-31T11:20:59.000Z | util/update.py | suchak1/hyperdrive | 8bc78af179de8d2b26968683d3248840f7470d4c | [
"MIT"
] | 5 | 2021-04-07T05:26:40.000Z | 2022-02-25T15:26:02.000Z | import os
import re
import requests
import subprocess
filename = 'requirements.txt'
new_packages = []
with open(filename, 'r') as file:
pattern = '(.*) == (.*)'
packages = re.findall(pattern, file.read())
for package, version in packages:
response = requests.get(f'https://pypi.org/pypi/{package}/json')
keys = response.json()['releases'].keys()
releases = [key for key in keys if key.replace('.', '').isdigit()]
latest = sorted(
releases,
key=lambda release: [
int(number) for number in release.split('.')
]).pop()
if latest != version:
print(f'Upgrading {package} ({version} => {latest})')
CI = os.environ.get('CI')
python = 'python' if CI else 'python3'
cmd = f'{python} -m pip install {package}=={latest}'
code = subprocess.run(cmd, shell=True).returncode
if code:
exit(code)
version = latest
new_packages.append((package, version))
with open(filename, 'w') as file:
for package, version in new_packages:
file.write(f'{package} == {version}\n')
| 33.666667 | 75 | 0.546205 | import os
import re
import requests
import subprocess
filename = 'requirements.txt'
new_packages = []
with open(filename, 'r') as file:
pattern = '(.*) == (.*)'
packages = re.findall(pattern, file.read())
for package, version in packages:
response = requests.get(f'https://pypi.org/pypi/{package}/json')
keys = response.json()['releases'].keys()
releases = [key for key in keys if key.replace('.', '').isdigit()]
latest = sorted(
releases,
key=lambda release: [
int(number) for number in release.split('.')
]).pop()
if latest != version:
print(f'Upgrading {package} ({version} => {latest})')
CI = os.environ.get('CI')
python = 'python' if CI else 'python3'
cmd = f'{python} -m pip install {package}=={latest}'
code = subprocess.run(cmd, shell=True).returncode
if code:
exit(code)
version = latest
new_packages.append((package, version))
with open(filename, 'w') as file:
for package, version in new_packages:
file.write(f'{package} == {version}\n')
| 0 | 0 | 0 |
b7ba00d67a5f1b17de35a1d6768295104c0874fb | 222 | py | Python | subarrrayDivision.py | sanjaykaswan/HackerRank | 23cebf02bfacea50d5982ce889b76025312c5c61 | [
"MIT"
] | null | null | null | subarrrayDivision.py | sanjaykaswan/HackerRank | 23cebf02bfacea50d5982ce889b76025312c5c61 | [
"MIT"
] | null | null | null | subarrrayDivision.py | sanjaykaswan/HackerRank | 23cebf02bfacea50d5982ce889b76025312c5c61 | [
"MIT"
] | 1 | 2020-10-05T11:55:48.000Z | 2020-10-05T11:55:48.000Z | n = int(input())
num = list(map(int , input().split()))
d,m = map(int , input().split())
c= 0
for i in range(0,n-m+1):
d_ = 0
for j in range(0,m):
d_ += num[i+j]
if d_ == d:
c += 1
print(c) | 14.8 | 38 | 0.463964 | n = int(input())
num = list(map(int , input().split()))
d,m = map(int , input().split())
c= 0
for i in range(0,n-m+1):
d_ = 0
for j in range(0,m):
d_ += num[i+j]
if d_ == d:
c += 1
print(c) | 0 | 0 | 0 |
58a7e0d66eb0c7383e5fc0b4dcbd11f6f7eace49 | 4,222 | py | Python | tests/wrappers/test_multioutput.py | bpkwee/metrics | 3aba057ad9ff87183aaaf5988b8ccfdab81b2095 | [
"Apache-2.0"
] | null | null | null | tests/wrappers/test_multioutput.py | bpkwee/metrics | 3aba057ad9ff87183aaaf5988b8ccfdab81b2095 | [
"Apache-2.0"
] | null | null | null | tests/wrappers/test_multioutput.py | bpkwee/metrics | 3aba057ad9ff87183aaaf5988b8ccfdab81b2095 | [
"Apache-2.0"
] | null | null | null | from collections import namedtuple
from functools import partial
import pytest
import torch
from sklearn.metrics import accuracy_score
from sklearn.metrics import r2_score as sk_r2score
from tests.helpers import seed_all
from tests.helpers.testers import BATCH_SIZE, NUM_BATCHES, NUM_CLASSES, MetricTester
from torchmetrics import Metric
from torchmetrics.classification import Accuracy
from torchmetrics.regression import R2Score
from torchmetrics.wrappers.multioutput import MultioutputWrapper
seed_all(42)
class _MultioutputMetric(Metric):
"""Test class that allows passing base metric as a class rather than its instantiation to the wrapper."""
def _update(self, preds: torch.Tensor, target: torch.Tensor) -> None:
"""Update the each pair of outputs and predictions."""
return self.metric.update(preds, target)
def _compute(self) -> torch.Tensor:
"""Compute the R2 score between each pair of outputs and predictions."""
return self.metric.compute()
@torch.jit.unused
def forward(self, *args, **kwargs):
"""Run forward on the underlying metric."""
return self.metric(*args, **kwargs)
def reset(self) -> None:
"""Reset the underlying metric state."""
self.metric.reset()
num_targets = 2
Input = namedtuple("Input", ["preds", "target"])
_multi_target_regression_inputs = Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, num_targets),
target=torch.rand(NUM_BATCHES, BATCH_SIZE, num_targets),
)
_multi_target_classification_inputs = Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES, num_targets),
target=torch.randint(NUM_CLASSES, (NUM_BATCHES, BATCH_SIZE, num_targets)),
)
def _multi_target_sk_r2score(preds, target, adjusted=0, multioutput="raw_values"):
"""Compute R2 score over multiple outputs."""
sk_preds = preds.view(-1, num_targets).numpy()
sk_target = target.view(-1, num_targets).numpy()
r2_score = sk_r2score(sk_target, sk_preds, multioutput=multioutput)
if adjusted != 0:
r2_score = 1 - (1 - r2_score) * (sk_preds.shape[0] - 1) / (sk_preds.shape[0] - adjusted - 1)
return r2_score
def _multi_target_sk_accuracy(preds, target, num_outputs):
"""Compute accuracy over multiple outputs."""
accs = []
for i in range(num_outputs):
accs.append(accuracy_score(torch.argmax(preds[:, :, i], dim=1), target[:, i]))
return accs
@pytest.mark.parametrize(
"base_metric_class, compare_metric, preds, target, num_outputs, metric_kwargs",
[
(
R2Score,
_multi_target_sk_r2score,
_multi_target_regression_inputs.preds,
_multi_target_regression_inputs.target,
num_targets,
{},
),
(
Accuracy,
partial(_multi_target_sk_accuracy, num_outputs=2),
_multi_target_classification_inputs.preds,
_multi_target_classification_inputs.target,
num_targets,
dict(num_classes=NUM_CLASSES),
),
],
)
class TestMultioutputWrapper(MetricTester):
"""Test the MultioutputWrapper class with regression and classification inner metrics."""
@pytest.mark.parametrize("ddp", [True, False])
@pytest.mark.parametrize("dist_sync_on_step", [True, False])
def test_multioutput_wrapper(
self, base_metric_class, compare_metric, preds, target, num_outputs, metric_kwargs, ddp, dist_sync_on_step
):
"""Test that the multioutput wrapper properly slices and computes outputs along the output dimension for
both classification and regression metrics."""
self.run_class_metric_test(
ddp,
preds,
target,
_MultioutputMetric,
compare_metric,
dist_sync_on_step,
metric_args=dict(num_outputs=num_outputs, base_metric_class=base_metric_class, **metric_kwargs),
)
| 34.048387 | 114 | 0.681431 | from collections import namedtuple
from functools import partial
import pytest
import torch
from sklearn.metrics import accuracy_score
from sklearn.metrics import r2_score as sk_r2score
from tests.helpers import seed_all
from tests.helpers.testers import BATCH_SIZE, NUM_BATCHES, NUM_CLASSES, MetricTester
from torchmetrics import Metric
from torchmetrics.classification import Accuracy
from torchmetrics.regression import R2Score
from torchmetrics.wrappers.multioutput import MultioutputWrapper
seed_all(42)
class _MultioutputMetric(Metric):
"""Test class that allows passing base metric as a class rather than its instantiation to the wrapper."""
def __init__(
self,
base_metric_class,
num_outputs: int = 1,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.metric = MultioutputWrapper(
base_metric_class(**kwargs),
num_outputs=num_outputs,
)
def _update(self, preds: torch.Tensor, target: torch.Tensor) -> None:
"""Update the each pair of outputs and predictions."""
return self.metric.update(preds, target)
def _compute(self) -> torch.Tensor:
"""Compute the R2 score between each pair of outputs and predictions."""
return self.metric.compute()
@torch.jit.unused
def forward(self, *args, **kwargs):
"""Run forward on the underlying metric."""
return self.metric(*args, **kwargs)
def reset(self) -> None:
"""Reset the underlying metric state."""
self.metric.reset()
num_targets = 2
Input = namedtuple("Input", ["preds", "target"])
_multi_target_regression_inputs = Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, num_targets),
target=torch.rand(NUM_BATCHES, BATCH_SIZE, num_targets),
)
_multi_target_classification_inputs = Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES, num_targets),
target=torch.randint(NUM_CLASSES, (NUM_BATCHES, BATCH_SIZE, num_targets)),
)
def _multi_target_sk_r2score(preds, target, adjusted=0, multioutput="raw_values"):
"""Compute R2 score over multiple outputs."""
sk_preds = preds.view(-1, num_targets).numpy()
sk_target = target.view(-1, num_targets).numpy()
r2_score = sk_r2score(sk_target, sk_preds, multioutput=multioutput)
if adjusted != 0:
r2_score = 1 - (1 - r2_score) * (sk_preds.shape[0] - 1) / (sk_preds.shape[0] - adjusted - 1)
return r2_score
def _multi_target_sk_accuracy(preds, target, num_outputs):
"""Compute accuracy over multiple outputs."""
accs = []
for i in range(num_outputs):
accs.append(accuracy_score(torch.argmax(preds[:, :, i], dim=1), target[:, i]))
return accs
@pytest.mark.parametrize(
"base_metric_class, compare_metric, preds, target, num_outputs, metric_kwargs",
[
(
R2Score,
_multi_target_sk_r2score,
_multi_target_regression_inputs.preds,
_multi_target_regression_inputs.target,
num_targets,
{},
),
(
Accuracy,
partial(_multi_target_sk_accuracy, num_outputs=2),
_multi_target_classification_inputs.preds,
_multi_target_classification_inputs.target,
num_targets,
dict(num_classes=NUM_CLASSES),
),
],
)
class TestMultioutputWrapper(MetricTester):
"""Test the MultioutputWrapper class with regression and classification inner metrics."""
@pytest.mark.parametrize("ddp", [True, False])
@pytest.mark.parametrize("dist_sync_on_step", [True, False])
def test_multioutput_wrapper(
self, base_metric_class, compare_metric, preds, target, num_outputs, metric_kwargs, ddp, dist_sync_on_step
):
"""Test that the multioutput wrapper properly slices and computes outputs along the output dimension for
both classification and regression metrics."""
self.run_class_metric_test(
ddp,
preds,
target,
_MultioutputMetric,
compare_metric,
dist_sync_on_step,
metric_args=dict(num_outputs=num_outputs, base_metric_class=base_metric_class, **metric_kwargs),
)
| 261 | 0 | 27 |
691178639c6d8f94470bbd96184c210f322e6490 | 9,225 | py | Python | oldp/apps/references/models.py | ImgBotApp/oldp | 575dc6f711dde3470d910e21c9440ee9b79a69ed | [
"MIT"
] | 3 | 2020-06-27T08:19:35.000Z | 2020-12-27T17:46:02.000Z | oldp/apps/references/models.py | ImgBotApp/oldp | 575dc6f711dde3470d910e21c9440ee9b79a69ed | [
"MIT"
] | null | null | null | oldp/apps/references/models.py | ImgBotApp/oldp | 575dc6f711dde3470d910e21c9440ee9b79a69ed | [
"MIT"
] | null | null | null | import hashlib
import json
import logging
import re
import uuid
from django.db import models
from django.db.models.signals import pre_save
from django.dispatch import receiver
from oldp.apps.cases.models import Case
from oldp.apps.laws.models import Law
logger = logging.getLogger(__name__)
class ReferenceMarker(models.Model):
"""
Abstract class for reference markers, i.e. the actual reference within a text "§§ 12-14 BGB".
Marker has a position (start, end, line), unique identifier (uuid, randomly generated), text of the marker as in
the text, list of references (can be law, case, ...). Implementations of abstract class (LawReferenceMarker, ...)
have the corresponding source object (LawReferenceMarker: referenced_by = a law object).
"""
text = models.CharField(max_length=250) # Text of marker
uuid = models.CharField(max_length=36)
start = models.IntegerField(default=0)
end = models.IntegerField(default=0)
line = models.CharField(blank=True, max_length=200)
referenced_by = None
referenced_by_type = None
references = []
@staticmethod
@staticmethod
def make_markers_clickable(value):
"""
TODO Replace ref marker number with db id
"""
return re.sub(r'\[ref=([-a-z0-9]+)\](.*?)\[\/ref\]', r'<a href="#refs" onclick="clickRefMarker(this);" data-ref-uuid="\1" class="ref">\2</a>', value)
class LawReferenceMarker(ReferenceMarker):
"""
A reference marker in a law content object.
"""
referenced_by_type = Law
referenced_by = models.ForeignKey(Law, on_delete=models.CASCADE)
@receiver(pre_save, sender=LawReferenceMarker)
class CaseReferenceMarker(ReferenceMarker):
"""
A reference marker in a case content object.
"""
referenced_by_type = Case
referenced_by = models.ForeignKey(Case, on_delete=models.CASCADE)
@receiver(pre_save, sender=CaseReferenceMarker)
class Reference(models.Model):
"""
A reference connecting two content objects (1:1 relation). The object that is referenced is either "law", "case"
or ... (reference target). The referencing object (the object which text contains the reference) can be derived
via marker.
Abstract class: Depending on the referencing object (its marker) the corresponding implementation is used.
If the referenced object is not defined, the reference is "not assigned" (is_assigned method)
"""
law = models.ForeignKey(Law, null=True, on_delete=models.SET_NULL)
case = models.ForeignKey(Case, null=True, on_delete=models.SET_NULL)
to = models.CharField(max_length=250) # to as string, if case or law cannot be assigned (ref id)
to_hash = models.CharField(max_length=100, null=True)
marker = None
count = None
def get_url(self):
"""
Returns Url to law or case item (if exist) otherwise return search Url.
:return:
"""
if self.law is not None:
return self.law.get_url()
elif self.case is not None:
return self.case.get_url()
else:
return '/search/?q=%s' % self.marker.text
class LawReference(Reference):
"""
A reference from a law to any content object (law, case, ...)
"""
marker = models.ForeignKey(LawReferenceMarker, on_delete=models.CASCADE)
@receiver(pre_save, sender=LawReference)
class CaseReference(Reference):
"""
A reference from a case to any content object (law, case, ...)
"""
marker = models.ForeignKey(CaseReferenceMarker, on_delete=models.CASCADE)
@receiver(pre_save, sender=CaseReference)
# @receiver(pre_save, sender=Reference)
# def json_dumps_reference(sender, instance, *args, **kwargs):
# if not isinstance(instance.to, str):
# instance.to = json.dumps(instance.to)
# @receiver(post_init, sender=LawReference)
# def json_loads_reference(sender, instance, *args, **kwargs):
# print(instance.ids)
# exit(0)
# if instance.ids is not None and isinstance(instance.ids, str):
# instance.ids = json.loads(instance.ids)
| 30.04886 | 157 | 0.617344 | import hashlib
import json
import logging
import re
import uuid
from django.db import models
from django.db.models.signals import pre_save
from django.dispatch import receiver
from oldp.apps.cases.models import Case
from oldp.apps.laws.models import Law
logger = logging.getLogger(__name__)
class ReferenceMarker(models.Model):
"""
Abstract class for reference markers, i.e. the actual reference within a text "§§ 12-14 BGB".
Marker has a position (start, end, line), unique identifier (uuid, randomly generated), text of the marker as in
the text, list of references (can be law, case, ...). Implementations of abstract class (LawReferenceMarker, ...)
have the corresponding source object (LawReferenceMarker: referenced_by = a law object).
"""
text = models.CharField(max_length=250) # Text of marker
uuid = models.CharField(max_length=36)
start = models.IntegerField(default=0)
end = models.IntegerField(default=0)
line = models.CharField(blank=True, max_length=200)
referenced_by = None
referenced_by_type = None
references = []
class Meta:
abstract = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# TODO Handle ids with signals?
def get_referenced_by(self):
raise NotImplementedError()
def replace_content(self, content, marker_offset, key):
marker_close = '[/ref]'
start = self.start + marker_offset
end = self.end + marker_offset
# marker_open = '[ref=%i]' % key
# Instead of key use uuid
marker_open = '[ref=%s]' % self.uuid
marker_offset += len(marker_open) + len(marker_close)
# double replacements
content = content[:start] \
+ marker_open \
+ content[start:end] \
+ marker_close \
+ content[end:]
return content, marker_offset
def set_uuid(self):
self.uuid = uuid.uuid4()
def set_references(self, ids_list):
# TODO Save references to db
# TODO Assign items after complete data is saved in db
# print('Save ref ids: %s' % ids_list)
# print('TODO needs to save ref markers first')
# exit(1)
if self.__class__.__name__ == 'LawReferenceMarker':
reference_type = LawReference
elif self.__class__.__name__ == 'CaseReferenceMarker':
reference_type = CaseReference
else:
raise ValueError('Cannot determine reference_type: %s' % self.__class__.__name__)
self.references = []
# Transform to list if is JSON string
if isinstance(ids_list, str):
ids_list = json.loads(ids_list)
for ref_id in ids_list:
ref_id = json.dumps(ref_id)
self.references.append(reference_type(to=ref_id, marker=self))
self.ids = ids_list
def save_references(self):
if self.references:
for ref in self.references:
ref.save()
logger.debug('Saved: %s' % ref)
# exit(1)
else:
logger.debug('No references to save')
def get_references(self):
# TODO Get references from db
if isinstance(self.ids, str):
self.ids = json.loads(self.ids)
return self.ids
def from_ref(self, ref, by):
self.ids = ref.ids
self.line = ref.line
self.start = ref.start
self.end = ref.end
self.text = ref.text
self.uuid = ref.uuid
self.referenced_by = by
# self.set_references(self.ids)
return self
def __repr__(self):
return self.__str__()
def __str__(self):
return 'RefMarker(ids=%s, line=%s, pos=%i-%i, by=%s)' % ('self.ids', self.line, self.start, self.end, self.referenced_by)
@staticmethod
def remove_markers(value):
return re.sub(r'\[ref=([-a-z0-9]+)\](.*?)\[\/ref\]', r'\2', value)
@staticmethod
def make_markers_clickable(value):
"""
TODO Replace ref marker number with db id
"""
return re.sub(r'\[ref=([-a-z0-9]+)\](.*?)\[\/ref\]', r'<a href="#refs" onclick="clickRefMarker(this);" data-ref-uuid="\1" class="ref">\2</a>', value)
class LawReferenceMarker(ReferenceMarker):
"""
A reference marker in a law content object.
"""
referenced_by_type = Law
referenced_by = models.ForeignKey(Law, on_delete=models.CASCADE)
def get_referenced_by(self) -> Law:
return self.referenced_by
@receiver(pre_save, sender=LawReferenceMarker)
def json_dumps_reference(sender, instance, *args, **kwargs):
if isinstance(instance.ids, list):
# Save ids as JSON
instance.ids = json.dumps(instance.ids)
class CaseReferenceMarker(ReferenceMarker):
"""
A reference marker in a case content object.
"""
referenced_by_type = Case
referenced_by = models.ForeignKey(Case, on_delete=models.CASCADE)
def get_referenced_by(self) -> Case:
return self.referenced_by
@receiver(pre_save, sender=CaseReferenceMarker)
def json_dumps_reference(sender, instance, *args, **kwargs):
if isinstance(instance.ids, list):
# Save ids as JSON
instance.ids = json.dumps(instance.ids)
class Reference(models.Model):
"""
A reference connecting two content objects (1:1 relation). The object that is referenced is either "law", "case"
or ... (reference target). The referencing object (the object which text contains the reference) can be derived
via marker.
Abstract class: Depending on the referencing object (its marker) the corresponding implementation is used.
If the referenced object is not defined, the reference is "not assigned" (is_assigned method)
"""
law = models.ForeignKey(Law, null=True, on_delete=models.SET_NULL)
case = models.ForeignKey(Case, null=True, on_delete=models.SET_NULL)
to = models.CharField(max_length=250) # to as string, if case or law cannot be assigned (ref id)
to_hash = models.CharField(max_length=100, null=True)
marker = None
count = None
class Meta:
abstract = True
def get_url(self):
"""
Returns Url to law or case item (if exist) otherwise return search Url.
:return:
"""
if self.law is not None:
return self.law.get_url()
elif self.case is not None:
return self.case.get_url()
else:
return '/search/?q=%s' % self.marker.text
def get_target(self):
if self.law is not None:
return self.law
elif self.case is not None:
return self.case
else:
return None
def get_title(self):
if self.law is not None:
return self.law.get_title()
elif self.case is not None:
return self.case.get_title()
else:
to = json.loads(self.to)
to['sect'] = str(to['sect'])
if to['type'] == 'law' and 'book' in to and 'sect' in to:
print(to)
if to['book'] == 'gg':
sect_prefix = 'Art.'
elif 'anlage' in to['sect']:
sect_prefix = ''
else:
sect_prefix = '§'
to['sect'] = to['sect'].replace('anlage-', 'Anlage ')
return sect_prefix + ' ' + to['sect'] + ' ' + to['book'].upper()
else:
return self.marker.text
def is_assigned(self):
return self.law is not None or self.case is not None
def set_to_hash(self):
m = hashlib.md5()
m.update(self.to.encode('utf-8'))
self.to_hash = m.hexdigest()
def __repr__(self):
return self.__str__()
def __str__(self):
if self.count:
return 'Reference(count=%i, to=%s, hash=%s)' % (self.count, self.to, self.to_hash)
else:
# return self.__dict__
return 'Reference(%s, target=%s, marker=%s)' % (self.to, self.get_target(), self.marker)
class LawReference(Reference):
"""
A reference from a law to any content object (law, case, ...)
"""
marker = models.ForeignKey(LawReferenceMarker, on_delete=models.CASCADE)
@receiver(pre_save, sender=LawReference)
def pre_save_law_reference(sender, instance, *args, **kwargs):
instance.set_to_hash()
class CaseReference(Reference):
"""
A reference from a case to any content object (law, case, ...)
"""
marker = models.ForeignKey(CaseReferenceMarker, on_delete=models.CASCADE)
@receiver(pre_save, sender=CaseReference)
def pre_save_case_reference(sender, instance, *args, **kwargs):
instance.set_to_hash()
# @receiver(pre_save, sender=Reference)
# def json_dumps_reference(sender, instance, *args, **kwargs):
# if not isinstance(instance.to, str):
# instance.to = json.dumps(instance.to)
# @receiver(post_init, sender=LawReference)
# def json_loads_reference(sender, instance, *args, **kwargs):
# print(instance.ids)
# exit(0)
# if instance.ids is not None and isinstance(instance.ids, str):
# instance.ids = json.loads(instance.ids)
| 4,473 | 28 | 654 |
1b56f073718e33f60f9ca4b06328c7693d89ada9 | 136 | py | Python | apt-flash.py | apt-flash/apt-flash | 9ea6ebf016988f51407005b7e3d234b4807612d4 | [
"MIT"
] | null | null | null | apt-flash.py | apt-flash/apt-flash | 9ea6ebf016988f51407005b7e3d234b4807612d4 | [
"MIT"
] | null | null | null | apt-flash.py | apt-flash/apt-flash | 9ea6ebf016988f51407005b7e3d234b4807612d4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
if os.geteuid() != 0:
exit('This script requires root privileges.\nPlease try again with sudo.')
| 19.428571 | 78 | 0.698529 | #!/usr/bin/env python3
import os
if os.geteuid() != 0:
exit('This script requires root privileges.\nPlease try again with sudo.')
| 0 | 0 | 0 |
c71155e531dae239fa04c8b65a3d46aaeb1df3cc | 2,962 | py | Python | avg_embedding_means.py | lidalei/Real-Time-Voice-Cloning | 25d39c31b96d6c5b1783b36d9c09bb2a450ddaee | [
"MIT"
] | 1 | 2019-11-07T14:07:23.000Z | 2019-11-07T14:07:23.000Z | avg_embedding_means.py | lidalei/Real-Time-Voice-Cloning | 25d39c31b96d6c5b1783b36d9c09bb2a450ddaee | [
"MIT"
] | null | null | null | avg_embedding_means.py | lidalei/Real-Time-Voice-Cloning | 25d39c31b96d6c5b1783b36d9c09bb2a450ddaee | [
"MIT"
] | null | null | null | import argparse
from pathlib import Path
import typing
import numpy as np
import scipy.spatial.distance
from encoder.inference import Model as EncoderModel
from synthesizer.inference import Synthesizer
_NUM_ENROLLMENTS = 3
_NUM_VERIFICATIONS = 5
_WAV_FODLER = Path('/Users/dalei/Downloads/VCTK-Corpus/wav48')
_TXT_FODLER = Path('/Users/dalei/Downloads/VCTK-Corpus/txt')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
args, _ = parser.parse_known_args()
run(args)
| 38.973684 | 118 | 0.667792 | import argparse
from pathlib import Path
import typing
import numpy as np
import scipy.spatial.distance
from encoder.inference import Model as EncoderModel
from synthesizer.inference import Synthesizer
_NUM_ENROLLMENTS = 3
_NUM_VERIFICATIONS = 5
_WAV_FODLER = Path('/Users/dalei/Downloads/VCTK-Corpus/wav48')
_TXT_FODLER = Path('/Users/dalei/Downloads/VCTK-Corpus/txt')
def run(args: argparse.Namespace):
# Load encoder model
encoder = EncoderModel()
encoder.load(Path('encoder/saved_models/pretrained.pt'))
# [p304, p305, ...]
speaker_dirs = [f.parts[-1] for f in _WAV_FODLER.glob("*") if f.is_dir()]
if len(speaker_dirs) == 0:
raise Exception("No speakers found. Make sure you are pointing to the directory")
# 'p304' -> [001.wav, 002.wav, ...]
speaker_utterances = dict() # type: typing.Dict[str, typing.List[str]]
for d in speaker_dirs:
speaker_utterances[d] = [w.parts[-1] for w in _WAV_FODLER.joinpath(d).glob('*.wav')]
speaker_embeddings = dict() # type: typing.Dict[str, np.ndarray]
no_use_speaker_embeddings = dict() # type: typing.Dict[str, np.ndarray]
for d in speaker_utterances:
utterances = speaker_utterances[d]
enrollments = utterances[:_NUM_ENROLLMENTS]
print(f'speaker: {d}, enrollments: {enrollments}')
audios = [_WAV_FODLER.joinpath(d, u) for u in enrollments]
speaker_embeddings[d] = encoder.embed_speaker(audios, using_partials=True)
no_use_speaker_embeddings[d] = encoder.embed_speaker(audios, using_partials=False)
# Different speaker
for d in speaker_utterances:
utterances = speaker_utterances[d]
# Repeat 5 times
for utterance in np.random.choice(utterances, size=_NUM_VERIFICATIONS, replace=False): # type: str
txt = _TXT_FODLER.joinpath(d, utterance).with_suffix('.txt')
text = txt.read_text()
# using partials
utterance_embedding = encoder.embed_utterance(
_WAV_FODLER.joinpath(d, utterance),
source_sr=Synthesizer.sample_rate,
using_partials=True,
)
cosine_similarity = 1.0 - scipy.spatial.distance.cosine(speaker_embeddings[d], utterance_embedding)
print(f'use: speaker: {d}, utterance: {utterance}, text: {text}, sim: {cosine_similarity}')
# not using partials
utterance_embedding = encoder.embed_utterance(
_WAV_FODLER.joinpath(d, utterance),
source_sr=Synthesizer.sample_rate,
using_partials=False,
)
cosine_similarity = 1.0 - scipy.spatial.distance.cosine(no_use_speaker_embeddings[d], utterance_embedding)
print(f'no_use: speaker: {d}, utterance: {utterance}, text: {text}, sim: {cosine_similarity}')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
args, _ = parser.parse_known_args()
run(args)
| 2,440 | 0 | 23 |
6a36a28b6799120be05f798a21f49d9d39daf42b | 1,813 | py | Python | dist-packages/cupshelpers/__init__.py | Jianwei-Wang/python2.7_lib | 911b8e81512e5ac5f13e669ab46f7693ed897378 | [
"PSF-2.0"
] | null | null | null | dist-packages/cupshelpers/__init__.py | Jianwei-Wang/python2.7_lib | 911b8e81512e5ac5f13e669ab46f7693ed897378 | [
"PSF-2.0"
] | null | null | null | dist-packages/cupshelpers/__init__.py | Jianwei-Wang/python2.7_lib | 911b8e81512e5ac5f13e669ab46f7693ed897378 | [
"PSF-2.0"
] | null | null | null | ## system-config-printer
## Copyright (C) 2008, 2011 Red Hat, Inc.
## Authors:
## Tim Waugh <twaugh@redhat.com>
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
__all__ = ['set_debugprint_fn',
'Device', 'Printer', 'activateNewPrinter',
'copyPPDOptions', 'getDevices', 'getPrinters',
'missingPackagesAndExecutables', 'missingExecutables',
'parseDeviceID',
'setPPDPageSize',
'ppds',
'openprinting']
_debugprint_fn = _no_debug
def set_debugprint_fn (debugprint):
"""
Set debugging hook.
@param debugprint: function to print debug output
@type debugprint: fn (str) -> None
"""
global _debugprint_fn
_debugprint_fn = debugprint
from cupshelpers import \
Device, \
Printer, \
activateNewPrinter, \
copyPPDOptions, \
getDevices, \
getPrinters, \
missingPackagesAndExecutables, \
missingExecutables, \
parseDeviceID, \
setPPDPageSize
import ppds
import openprinting
| 29.721311 | 82 | 0.669608 | ## system-config-printer
## Copyright (C) 2008, 2011 Red Hat, Inc.
## Authors:
## Tim Waugh <twaugh@redhat.com>
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
__all__ = ['set_debugprint_fn',
'Device', 'Printer', 'activateNewPrinter',
'copyPPDOptions', 'getDevices', 'getPrinters',
'missingPackagesAndExecutables', 'missingExecutables',
'parseDeviceID',
'setPPDPageSize',
'ppds',
'openprinting']
def _no_debug (x):
return
_debugprint_fn = _no_debug
def _debugprint (x):
_debugprint_fn (x)
def set_debugprint_fn (debugprint):
"""
Set debugging hook.
@param debugprint: function to print debug output
@type debugprint: fn (str) -> None
"""
global _debugprint_fn
_debugprint_fn = debugprint
from cupshelpers import \
Device, \
Printer, \
activateNewPrinter, \
copyPPDOptions, \
getDevices, \
getPrinters, \
missingPackagesAndExecutables, \
missingExecutables, \
parseDeviceID, \
setPPDPageSize
import ppds
import openprinting
| 30 | 0 | 45 |
8ac61c5c2a642c55b26c0a95260c48d9f0ec3980 | 9,600 | py | Python | kinto/tests/core/test_cache.py | swhgoon/kinto | 10001d44bb08e4fbc74da31a41a4eaa461e0fd7f | [
"Apache-2.0"
] | null | null | null | kinto/tests/core/test_cache.py | swhgoon/kinto | 10001d44bb08e4fbc74da31a41a4eaa461e0fd7f | [
"Apache-2.0"
] | null | null | null | kinto/tests/core/test_cache.py | swhgoon/kinto | 10001d44bb08e4fbc74da31a41a4eaa461e0fd7f | [
"Apache-2.0"
] | 1 | 2020-07-15T04:27:08.000Z | 2020-07-15T04:27:08.000Z | import mock
import time
import redis
from pyramid import testing
from kinto.core.utils import sqlalchemy
from kinto.core.storage import exceptions
from kinto.core.cache import (CacheBase, postgresql as postgresql_backend,
redis as redis_backend, memory as memory_backend,
heartbeat)
from .support import unittest, skip_if_no_postgresql
@skip_if_no_postgresql
| 32.542373 | 79 | 0.638646 | import mock
import time
import redis
from pyramid import testing
from kinto.core.utils import sqlalchemy
from kinto.core.storage import exceptions
from kinto.core.cache import (CacheBase, postgresql as postgresql_backend,
redis as redis_backend, memory as memory_backend,
heartbeat)
from .support import unittest, skip_if_no_postgresql
class CacheBaseTest(unittest.TestCase):
def setUp(self):
self.cache = CacheBase(cache_prefix='')
def test_mandatory_overrides(self):
calls = [
(self.cache.initialize_schema,),
(self.cache.flush,),
(self.cache.ttl, ''),
(self.cache.expire, '', ''),
(self.cache.get, ''),
(self.cache.set, '', ''),
(self.cache.delete, ''),
]
for call in calls:
self.assertRaises(NotImplementedError, *call)
class BaseTestCache(object):
backend = None
settings = {}
def setUp(self):
super(BaseTestCache, self).setUp()
self.cache = self.backend.load_from_config(self._get_config())
self.cache.initialize_schema()
self.request = None
self.client_error_patcher = None
def _get_config(self, settings=None):
"""Mock Pyramid config object.
"""
if settings is None:
settings = self.settings
config = testing.setUp()
config.add_settings(settings)
return config
def tearDown(self):
mock.patch.stopall()
super(BaseTestCache, self).tearDown()
self.cache.flush()
def get_backend_prefix(self, prefix):
settings_prefix = self.settings.copy()
settings_prefix['cache_prefix'] = prefix
config_prefix = self._get_config(settings=settings_prefix)
# initiating cache backend with prefix:
backend_prefix = self.backend.load_from_config(config_prefix)
return backend_prefix
def test_backend_error_is_raised_anywhere(self):
self.client_error_patcher.start()
calls = [
(self.cache.flush,),
(self.cache.ttl, ''),
(self.cache.expire, '', 0),
(self.cache.get, ''),
(self.cache.set, '', ''),
(self.cache.delete, ''),
]
for call in calls:
self.assertRaises(exceptions.BackendError, *call)
def test_ping_returns_false_if_unavailable(self):
self.client_error_patcher.start()
ping = heartbeat(self.cache)
self.assertFalse(ping(self.request))
with mock.patch('kinto.core.cache.random.random', return_value=0.6):
self.assertFalse(ping(self.request))
with mock.patch('kinto.core.cache.random.random', return_value=0.4):
self.assertFalse(ping(self.request))
def test_ping_returns_true_if_available(self):
ping = heartbeat(self.cache)
with mock.patch('kinto.core.cache.random.random', return_value=0.6):
self.assertTrue(ping(self.request))
with mock.patch('kinto.core.cache.random.random', return_value=0.4):
self.assertTrue(ping(self.request))
def test_ping_logs_error_if_unavailable(self):
self.client_error_patcher.start()
ping = heartbeat(self.cache)
with mock.patch('kinto.core.cache.logger.exception') as exc_handler:
self.assertFalse(ping(self.request))
self.assertTrue(exc_handler.called)
def test_set_adds_the_record(self):
stored = 'toto'
self.cache.set('foobar', stored)
retrieved = self.cache.get('foobar')
self.assertEquals(retrieved, stored)
def test_values_remains_python_dict(self):
def setget(k, v):
self.cache.set(k, v)
return (self.cache.get(k), v)
self.assertEqual(*setget('foobar', 3))
self.assertEqual(*setget('foobar', ['a']))
self.assertEqual(*setget('foobar', {'b': [1, 2]}))
self.assertEqual(*setget('foobar', 3.14))
def test_delete_removes_the_record(self):
self.cache.set('foobar', 'toto')
self.cache.delete('foobar')
retrieved = self.cache.get('foobar')
self.assertIsNone(retrieved)
def test_delete_does_not_fail_if_record_is_unknown(self):
self.cache.delete('foobar')
def test_expire_expires_the_value(self):
self.cache.set('foobar', 'toto')
self.cache.expire('foobar', 0.01)
time.sleep(0.02)
retrieved = self.cache.get('foobar')
self.assertIsNone(retrieved)
def test_set_with_ttl_expires_the_value(self):
self.cache.set('foobar', 'toto', 0.01)
time.sleep(0.02)
retrieved = self.cache.get('foobar')
self.assertIsNone(retrieved)
def test_ttl_return_the_time_to_live(self):
self.cache.set('foobar', 'toto')
self.cache.expire('foobar', 10)
ttl = self.cache.ttl('foobar')
self.assertGreater(ttl, 0)
self.assertLessEqual(ttl, 10)
def test_ttl_return_none_if_unknown(self):
ttl = self.cache.ttl('unknown')
self.assertTrue(ttl < 0)
def test_cache_prefix_is_set(self):
backend_prefix = self.get_backend_prefix(prefix='prefix_')
# Set the value
backend_prefix.set('key', 'foo')
# Validate that it was set with the prefix.
obtained = self.cache.get('prefix_key')
self.assertEqual(obtained, 'foo')
def test_cache_when_prefix_is_not_set(self):
backend_prefix = self.get_backend_prefix(prefix='')
# Set a value
backend_prefix.set('key', 'foo')
# Validate that it was set with no prefix
obtained = self.cache.get('key')
self.assertEqual(obtained, 'foo')
def test_prefix_value_use_to_get_data(self):
backend_prefix = self.get_backend_prefix(prefix='prefix_')
# Set the value with the prefix
self.cache.set('prefix_key', 'foo')
# Validate that the prefix was added
obtained = backend_prefix.get('key')
self.assertEqual(obtained, 'foo')
def test_prefix_value_use_to_delete_data(self):
backend_prefix = self.get_backend_prefix(prefix='prefix_')
# Set the value
self.cache.set('prefix_key', 'foo')
# Delete the value
backend_prefix.delete('key')
# Validate that the value was deleted
obtained = self.cache.get('prefix_key')
self.assertEqual(obtained, None)
def test_prefix_value_used_with_ttl(self):
backend_prefix = self.get_backend_prefix(prefix='prefix_')
self.cache.set('prefix_key', 'foo', 10)
# Validate that the ttl add the prefix to the key.
obtained = backend_prefix.ttl('key')
self.assertLessEqual(obtained, 10)
self.assertGreater(obtained, 9)
def test_prefix_value_used_with_expire(self):
backend_prefix = self.get_backend_prefix(prefix='prefix_')
self.cache.set('prefix_foobar', 'toto', 10)
# expiring the ttl of key
backend_prefix.expire('foobar', 0)
# Make sure the TTL was set accordingly.
ttl = self.cache.ttl('prefix_foobar')
self.assertLessEqual(ttl, 0)
# The record should have expired
retrieved = self.cache.get('prefix_foobar')
self.assertIsNone(retrieved)
class MemoryCacheTest(BaseTestCache, unittest.TestCase):
backend = memory_backend
settings = {
'cache_prefix': ''
}
def get_backend_prefix(self, prefix):
backend_prefix = BaseTestCache.get_backend_prefix(self, prefix)
# Share the store between both client for tests.
backend_prefix._ttl = self.cache._ttl
backend_prefix._store = self.cache._store
return backend_prefix
def test_backend_error_is_raised_anywhere(self):
pass
def test_ping_returns_false_if_unavailable(self):
pass
def test_ping_logs_error_if_unavailable(self):
pass
class RedisCacheTest(BaseTestCache, unittest.TestCase):
backend = redis_backend
settings = {
'cache_url': '',
'cache_pool_size': 10,
'cache_prefix': ''
}
def setUp(self):
super(RedisCacheTest, self).setUp()
self.client_error_patcher = mock.patch.object(
self.cache._client,
'execute_command',
side_effect=redis.RedisError)
def test_config_is_taken_in_account(self):
config = testing.setUp(settings=self.settings)
config.add_settings({'cache_url': 'redis://:secret@peer.loc:4444/7'})
backend = self.backend.load_from_config(config)
self.assertDictEqual(
backend.settings,
{'host': 'peer.loc', 'password': 'secret', 'db': 7, 'port': 4444})
def test_timeout_is_passed_to_redis_client(self):
config = testing.setUp(settings=self.settings)
config.add_settings({'cache_pool_timeout': '1.5'})
backend = self.backend.load_from_config(config)
self.assertEqual(backend._client.connection_pool.timeout, 1.5)
@skip_if_no_postgresql
class PostgreSQLCacheTest(BaseTestCache, unittest.TestCase):
backend = postgresql_backend
settings = {
'cache_pool_size': 10,
'cache_url': 'postgres://postgres:postgres@localhost:5432/testdb',
'cache_prefix': ''
}
def setUp(self):
super(PostgreSQLCacheTest, self).setUp()
self.client_error_patcher = mock.patch.object(
self.cache.client,
'session_factory',
side_effect=sqlalchemy.exc.SQLAlchemyError)
| 7,397 | 1,608 | 167 |
6afe9bb9c57f5f20486a9a35bab9902e2d952b02 | 3,910 | py | Python | tests/analysis/test_is_linear.py | alinavalinav/finn | e443a5859066a410a63c08dcfec4a90527ca24be | [
"BSD-3-Clause"
] | 1 | 2021-01-29T14:39:48.000Z | 2021-01-29T14:39:48.000Z | tests/analysis/test_is_linear.py | alinavalinav/finn | e443a5859066a410a63c08dcfec4a90527ca24be | [
"BSD-3-Clause"
] | null | null | null | tests/analysis/test_is_linear.py | alinavalinav/finn | e443a5859066a410a63c08dcfec4a90527ca24be | [
"BSD-3-Clause"
] | 1 | 2022-03-07T02:57:55.000Z | 2022-03-07T02:57:55.000Z | # Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import onnx.helper as oh
from onnx import TensorProto
import finn.analysis.topology as ta
from finn.core.modelwrapper import ModelWrapper
from finn.transformation.infer_shapes import InferShapes
| 45.465116 | 80 | 0.693606 | # Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import onnx.helper as oh
from onnx import TensorProto
import finn.analysis.topology as ta
from finn.core.modelwrapper import ModelWrapper
from finn.transformation.infer_shapes import InferShapes
def test_is_linear_linear():
top_in = oh.make_tensor_value_info("top_in", TensorProto.FLOAT, [2])
add_param = oh.make_tensor_value_info("add_param", TensorProto.FLOAT, [2])
mul_param = oh.make_tensor_value_info("mul_param", TensorProto.FLOAT, [2])
top_out = oh.make_tensor_value_info("top_out", TensorProto.FLOAT, [2])
modelproto = oh.make_model(
oh.make_graph(
name="test",
inputs=[top_in],
outputs=[top_out],
value_info=[add_param, mul_param],
nodes=[
oh.make_node("Add", ["top_in", "add_param"], ["middle"]),
oh.make_node("Mul", ["middle", "mul_param"], ["top_out"]),
],
)
)
model = ModelWrapper(modelproto)
model = model.transform(InferShapes())
ret = model.analysis(ta.is_linear)
assert ret["is_linear"] is True
def test_is_linear_forked_node_output():
top_in = oh.make_tensor_value_info("top_in", TensorProto.FLOAT, [2])
add_param = oh.make_tensor_value_info("add_param", TensorProto.FLOAT, [2])
mul0_param = oh.make_tensor_value_info("mul0_param", TensorProto.FLOAT, [2])
mul1_param = oh.make_tensor_value_info("mul1_param", TensorProto.FLOAT, [2])
mul0_res = oh.make_tensor_value_info("mul0_res", TensorProto.FLOAT, [2])
mul1_res = oh.make_tensor_value_info("mul1_res", TensorProto.FLOAT, [2])
top_out = oh.make_tensor_value_info("top_out", TensorProto.FLOAT, [2])
modelproto = oh.make_model(
oh.make_graph(
name="test",
inputs=[top_in],
outputs=[top_out],
value_info=[add_param, mul0_param, mul1_param, mul0_res, mul1_res],
nodes=[
oh.make_node("Add", ["top_in", "add_param"], ["middle"]),
oh.make_node("Mul", ["middle", "mul0_param"], ["mul0_res"]),
oh.make_node("Mul", ["middle", "mul1_param"], ["mul1_res"]),
oh.make_node("Add", ["mul0_res", "mul1_res"], ["top_out"]),
],
)
)
model = ModelWrapper(modelproto)
model = model.transform(InferShapes())
ret = model.analysis(ta.is_linear)
assert ret["is_linear"] is False
| 2,148 | 0 | 46 |
edf87f46fcc29e95f2f84b8cfba5416c27e3878e | 1,746 | py | Python | uwstyle/webbrowser/driver.py | TakamiChie/UWStyleMethods | 7635852ff902988843dbf17ddc29ea5f8350e6bf | [
"MIT"
] | 2 | 2019-01-24T00:13:46.000Z | 2020-09-30T22:59:32.000Z | uwstyle/webbrowser/driver.py | TakamiChie/UWStyleMethods | 7635852ff902988843dbf17ddc29ea5f8350e6bf | [
"MIT"
] | 5 | 2019-01-27T17:47:55.000Z | 2019-02-05T01:58:17.000Z | uwstyle/webbrowser/driver.py | TakamiChie/UWStyleMethods | 7635852ff902988843dbf17ddc29ea5f8350e6bf | [
"MIT"
] | null | null | null | from selenium import webdriver as seledriver
class WebDriver(object):
"""
The base class for controlling the browser in the webbrowser class.
Selenium Webdriver wrapper class.
"""
def __init__(self, options = None):
"""
Initialize Class
Parameters
----
options: str|list
arguments of webdriver
"""
self._webdriver = None
self.options = []
if options is not None:
self.add_options(options)
def add_options(self, value):
"""
add options
Parameters
----
value: str|list
arguments
"""
if type(value) == str:
self.options.append(value)
elif type(value == list):
for v in value:
if type(v) == str:
self.options.append(v)
else:
raise ValueError("Invalid Value")
else:
raise ValueError("Invalid Value")
def get_browser(self):
"""
get browser object
Returns
----
driver: selenium.webdriver
browser's driver object
"""
raise NotImplementedError
class ChromeDriver(WebDriver):
"""
Google Chrome's driver
require chromedriver_binary
`pip install chromedriver_binary`
This class does not currently support using Chrome with an existing profile.
The option does not specify User-data-dir because "Selenium.common.exceptions.webdriverexception" occurs.
"""
def get_browser(self):
"""
get browser object
Returns
----
driver: selenium.webdriver
browser's driver object
"""
import chromedriver_binary
options = seledriver.ChromeOptions()
for o in self.options:
options.add_argument(o)
return seledriver.Chrome(options=options) | 22.384615 | 107 | 0.650057 | from selenium import webdriver as seledriver
class WebDriver(object):
"""
The base class for controlling the browser in the webbrowser class.
Selenium Webdriver wrapper class.
"""
def __init__(self, options = None):
"""
Initialize Class
Parameters
----
options: str|list
arguments of webdriver
"""
self._webdriver = None
self.options = []
if options is not None:
self.add_options(options)
def add_options(self, value):
"""
add options
Parameters
----
value: str|list
arguments
"""
if type(value) == str:
self.options.append(value)
elif type(value == list):
for v in value:
if type(v) == str:
self.options.append(v)
else:
raise ValueError("Invalid Value")
else:
raise ValueError("Invalid Value")
def get_browser(self):
"""
get browser object
Returns
----
driver: selenium.webdriver
browser's driver object
"""
raise NotImplementedError
class ChromeDriver(WebDriver):
"""
Google Chrome's driver
require chromedriver_binary
`pip install chromedriver_binary`
This class does not currently support using Chrome with an existing profile.
The option does not specify User-data-dir because "Selenium.common.exceptions.webdriverexception" occurs.
"""
def __init__(self, options = None):
super().__init__(options)
def get_browser(self):
"""
get browser object
Returns
----
driver: selenium.webdriver
browser's driver object
"""
import chromedriver_binary
options = seledriver.ChromeOptions()
for o in self.options:
options.add_argument(o)
return seledriver.Chrome(options=options) | 44 | 0 | 24 |
eb369cd06803c8f52ecb564772f3688e3b56e158 | 3,074 | py | Python | examples/data_feed/feeder.py | B3K7/mygeotab-python | ef0064543b6d859044e815c629a0f7998e479247 | [
"Apache-2.0"
] | 40 | 2015-08-20T16:13:52.000Z | 2022-01-07T13:30:27.000Z | examples/data_feed/feeder.py | B3K7/mygeotab-python | ef0064543b6d859044e815c629a0f7998e479247 | [
"Apache-2.0"
] | 227 | 2015-08-20T17:41:07.000Z | 2022-01-15T01:57:26.000Z | examples/data_feed/feeder.py | B3K7/mygeotab-python | ef0064543b6d859044e815c629a0f7998e479247 | [
"Apache-2.0"
] | 17 | 2016-05-12T16:06:32.000Z | 2022-01-10T19:03:40.000Z | # -*- coding: utf-8 -*-
from collections import defaultdict
import click
from mygeotab import API, dates
from mygeotab.ext import feed
@click.command(help="A console data feeder example")
@click.argument("database", nargs=1, required=True)
@click.option("--user", "-u", prompt=True, help="A MyGeotab username")
@click.option("--password", "-p", prompt=True, hide_input=True, help="A MyGeotab password")
@click.option("--server", default=None, help="The server (default is my.geotab.com)")
@click.option(
"--interval",
"-i",
type=click.IntRange(5, 300),
default=60,
help="The data feed interval in seconds (default is 60 seconds)",
)
if __name__ == "__main__":
main()
| 32.702128 | 99 | 0.599545 | # -*- coding: utf-8 -*-
from collections import defaultdict
import click
from mygeotab import API, dates
from mygeotab.ext import feed
class ExceptionDataFeedListener(feed.DataFeedListener):
def __init__(self, api):
"""
A simple Data Feed listener for Exception Event data
:param api: The MyGeotab API object
"""
self.api = api
self._cache = defaultdict(dict)
super(feed.DataFeedListener, self).__init__()
def _populate_sub_entity(self, entity, type_name):
"""
Simple API-backed cache for populating MyGeotab entities
:param entity: The entity to populate a sub-entity for
:param type_name: The type of the sub-entity to populate
"""
key = type_name.lower()
if isinstance(entity[key], str):
# If the expected sub-entity is a string, it's a unknown ID
entity[key] = dict(id=entity[key])
return
cache = self._cache[key]
subentity = cache.get(entity[key]["id"])
if not subentity:
subentities = self.api.get(type_name, id=entity[key]["id"], results_limit=1)
if len(subentities) > 0:
subentity = subentities[0]
entity[key] = subentity
else:
entity[key] = subentity
def on_data(self, data):
"""
The function called when new data has arrived.
:param data: The list of data records received.
"""
for d in data:
self._populate_sub_entity(d, "Device")
self._populate_sub_entity(d, "Rule")
date = dates.localize_datetime(d["activeFrom"])
click.echo(
"[{date}] {device} ({rule})".format(
date=date,
device=d["device"].get("name", "**Unknown Vehicle"),
rule=d["rule"].get("name", "**Unknown Rule"),
)
)
def on_error(self, error):
"""
The function called when an error has occurred.
:rtype: bool
:param error:
:return: If True, keep listening. If False, stop the data feed.
"""
click.secho(error, fg="red")
return True
@click.command(help="A console data feeder example")
@click.argument("database", nargs=1, required=True)
@click.option("--user", "-u", prompt=True, help="A MyGeotab username")
@click.option("--password", "-p", prompt=True, hide_input=True, help="A MyGeotab password")
@click.option("--server", default=None, help="The server (default is my.geotab.com)")
@click.option(
"--interval",
"-i",
type=click.IntRange(5, 300),
default=60,
help="The data feed interval in seconds (default is 60 seconds)",
)
def main(database, user=None, password=None, server=None, interval=60):
api = API(database=database, username=user, password=password, server=server)
api.authenticate()
feed.DataFeed(api, ExceptionDataFeedListener(api), "ExceptionEvent", interval=interval).start()
if __name__ == "__main__":
main()
| 255 | 2,075 | 45 |
436776e2a237f4bbde5b9774d3036d52e8647ff0 | 13,211 | py | Python | offline/orchestrator.py | JoshBClemons/gesture_recognition | d1ddc6d086bf93b36a430fbcae0af14b9c584e92 | [
"MIT"
] | null | null | null | offline/orchestrator.py | JoshBClemons/gesture_recognition | d1ddc6d086bf93b36a430fbcae0af14b9c584e92 | [
"MIT"
] | null | null | null | offline/orchestrator.py | JoshBClemons/gesture_recognition | d1ddc6d086bf93b36a430fbcae0af14b9c584e92 | [
"MIT"
] | null | null | null | from config import Config
import psycopg2
from psycopg2.extras import Json, DictCursor
import pdb
import pandas as pd
import os
import time
import cv2
from gesture_recognition import featurizer
def orchestrator():
"""Pull frames with confidence, accurate predictions from database and use them to generate new model."""
# define database names
db_frames = 'frames'
db_model_scores = 'model_scores'
db_users = 'users'
db_conf_preds = 'confident_preds'
# define feature names
instance = 'instance'
user_id = 'user_id'
root_dir = 'root_dir'
pred_gest = 'pred_gest'
true_gest = 'true_gest'
pred_conf= 'pred_conf'
processed_path = 'processed_path'
# select all high-scoring predictions. These will be used to train new models.
conn = psycopg2.connect(host=Config.DB_HOST, database=Config.DB_NAME, user=Config.DB_USER, password=Config.DB_PASS)
cur = conn.cursor(cursor_factory=DictCursor)
confidence_threshold = 0 # way too low; used for testing
features = f'{instance}, {user_id}, {true_gest}, {pred_conf}, {root_dir}, {processed_path}'
query = 'SELECT ' + features + f' FROM {db_frames} WHERE {pred_conf} > {confidence_threshold} AND {pred_gest} = {true_gest}'
cur.execute(query)
conn.commit()
rows = cur.fetchall()
# make dataframe for high-scoring predictions that includes rotated images. Rotated images will enhance training results.
columns = [feature.strip() for feature in features.split(",")]
df = pd.DataFrame(rows, columns=columns)
df = df.drop(pred_conf, axis=1)
df = df[df.notnull()]
# exit if no frames in database
if df.empty:
print(f'[ERROR] No accurately predicted frames with prediction confidence > {confidence_threshold} in {db_frames}.')
cur.close()
else:
print(f'[INFO] Confident predictions pulled from {db_frames} table.')
# generate rotated images, save files to file storage system, append paths to dataframe
processed_path = 'processed_path'
flipped_path = 'flipped_path'
mirrored_path = 'mirrored_path'
mirrored_flipped_path = 'mirrored_flipped_path'
rotated_image_path_feats = [flipped_path, mirrored_path, mirrored_flipped_path]
for feat in rotated_image_path_feats:
df[feat] = None
df_feats = [instance, user_id, root_dir]
start_time = time.time()
for i in range(len(df)):
orig_path = df[processed_path][i]
frame_orig = cv2.imread(orig_path)
(_, frame_orig) = cv2.threshold(frame_orig, 127, 255, cv2.THRESH_BINARY)
row_orig = df.iloc[i]
rotate_dict = featurizer.rotate(frame_orig, row_orig, df_feats)
rotate_keys = list(rotate_dict.keys())
root_dir_path = row_orig[root_dir]
rotated_dir = os.path.join(root_dir_path, 'rotated')
if os.path.isdir(rotated_dir) == False:
print('[INFO] Creating directory for rotated images.')
os.mkdir(rotated_dir)
user_id_num = str(row_orig[user_id])
user_dir = os.path.join(rotated_dir, str(user_id_num))
if os.path.isdir(user_dir) == False:
print(f'[INFO] Creating directory for rotated images from user {user_id_num}.')
os.mkdir(user_dir)
for key in rotate_keys:
frame = rotate_dict[key]['frame']
path = rotate_dict[key]['path']
cv2.imwrite(path, frame)
try:
column = key + '_path'
df[column][i] = path
except:
print('[ERROR] Unable to save rotated image path to database or dataframe')
print(f'[INFO] Processing rotated images took {time.time() - start_time} seconds')
# drop user_id and root_dir from data frame
df = df.drop([user_id, root_dir], axis=1)
df = df.rename(columns={'true_gest': 'gesture'})
# add table of confident predictions to database
from sqlalchemy import create_engine
engine = create_engine("postgresql://{user}:{pw}@{host}/{name}".format(host=Config.DB_HOST, user=Config.DB_USER, pw=Config.DB_PASS, name=Config.DB_NAME))
table = 'conf_preds'
df.to_sql(table, con=engine, if_exists='replace', index=False) # would be better to append existing table conf_preds but current design processes all images from database rather than just new ones. Will update in the future.
print(f'[INFO] Table of confident predictions updated.')
# check if sufficient number of each gesture present in table of confident predictions. If not, exit since a new model cannot be trained
from objects import gestures_map # may place gestures_map on database. stored models should be saved with gestures_map they correspond with. example: train new model with additional gestures
gestures_list = list(gestures_map.values())
df_gestures_list = list(df['gesture'].unique())
differing_gestures = [gesture for gesture in gestures_list if gesture not in df_gestures_list]
if differing_gestures != []:
print(f'[ERROR] Not enough confident predictions have been made for {differing_gestures}. Unable to split data.')
return
# generate new table with image paths transposed for convenient model training
df_conf_preds = pd.DataFrame()
for i in range(len(df)):
row = df.iloc[i]
instance_val = row[instance]
gesture_val = row['gesture']
# append row for each file path. the predicted and true gestures of each file are the same
df_conf_preds = df_conf_preds.append([[instance_val + '_og', gesture_val, row[processed_path]]], ignore_index=True)
df_conf_preds = df_conf_preds.append([[instance_val + '_f', gesture_val, row[flipped_path]]], ignore_index=True)
df_conf_preds = df_conf_preds.append([[instance_val + '_m', gesture_val, row[mirrored_path]]], ignore_index=True)
df_conf_preds = df_conf_preds.append([[instance_val + '_mf', gesture_val, row[mirrored_flipped_path]]], ignore_index=True)
df_conf_preds = df_conf_preds.rename(columns={0: instance, 1: 'gesture', 2: 'path'})
# form y_data from confident predictions dataframe
from keras.utils import to_categorical
y_data = df_conf_preds['gesture']
for cat in list(gestures_map.keys()):
gesture_name = gestures_map[cat]
y_data = y_data.replace(gesture_name, cat)
y_data = to_categorical(y_data, num_classes=len(gestures_map.keys()))
y_data = pd.DataFrame(y_data)
# reduce table size to count of least occurring gesture
import random
driving_count = -1
for i in y_data.columns:
gesture_count = len(y_data[y_data[i] == 1][i])
if gesture_count < driving_count or driving_count == -1:
driving_count = gesture_count
indices = []
for i in y_data.columns:
gesture_indices = list(y_data[y_data[i] == 1][i].index);
sample_indices = random.sample(gesture_indices, driving_count);
indices.extend(sample_indices)
y_data = y_data.iloc[indices]
# form x_data from confident predictions dataframe. Size of x_data driven by least occuring gesture
x_data = df_conf_preds['path'].iloc[indices]
# split data into training (72%), validation (8%), and testing (20%) sets
test_size = 0.2
if len(x_data) < len(gestures_list)/test_size:
print(f'[ERROR] Not enough confident predictions have been made. Unable to split data.')
return
from sklearn.model_selection import train_test_split
x_train_paths, x_test_paths, y_train, y_test = train_test_split(x_data, y_data, test_size=0.2, stratify=y_data)
x_train_paths, x_val_paths, y_train, y_val = train_test_split(x_train_paths, y_train, test_size=0.1, stratify=y_train)
print(f'[INFO] Prepared training data. Building model...')
# build model
from .builder import build_and_save_model
from objects import gestures_map # ideally, the gesture map should be capable of dynamically impacting the training cycle. However, I am assuming the set of predicted gestures will not change
model_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'models')
[model_path, training_date] = build_and_save_model(x_train_paths, x_val_paths, y_train, y_val, model_dir) # wait until data collection infrastructure in place to train new models
# determine model_name based on entries in database
query = 'SELECT model_name from models'
cur.execute(query)
conn.commit()
rows = cur.fetchall()
if rows == []:
model_name = 'model_0'
else:
name_start = 'model_'
last_num = int(rows[-1][0].split(name_start)[1])
model_name = name_start + str(last_num+1)
# push model to database
gestures_map = Json(gestures_map)
model = open(model_path, 'rb').read()
model_blob = psycopg2.Binary(model)
table = 'models'
query = f"INSERT INTO {table}(model_name, training_date, gestures_map, model, model_path) VALUES('{model_name}', '{training_date}', {gestures_map}, {model_blob}, '{model_path}')"
cur.execute(query)
conn.commit()
print(f'[INFO] New model stored in database.')
# make dataframe containing all instances used to train new model
new_instances = df_conf_preds.loc[list(x_train_paths.index)]['instance'].sort_index()
df_new_instances = pd.DataFrame(new_instances)
df_new_instances[model_name] = 1
# update table that contains which frame instances were used to train which model(s)
# In the long run, this table may be helpful for determining which training images correspond with accurate models
# There is likely a cleaner way to accomplish this
table = 'model_train_data_map'
query = f"SELECT {instance} FROM {table}"
cur.execute(query)
conn.commit()
sql_instances = cur.fetchall()
if sql_instances != []:
df_sql_instances = pd.DataFrame(sql_instances).rename(columns={0: "instance"})
df_new_instances = df_new_instances.merge(df_sql_instances, how='outer', on='instance')
# push temporary table to database that contains all training instances with instances used to train new model indicated with "1"
new_instance = 'new_instance'
df_new_instances = df_new_instances.rename(columns={instance: new_instance})
new_table = 'new_table'
df_new_instances.to_sql(new_table, con=engine, if_exists='replace', index=False)
engine.dispose()
# on database, merge newly created temporary table with original one
temp_table = 'temp_table'
query = f"""
DROP TABLE IF EXISTS {temp_table};
SELECT * INTO {temp_table} FROM {new_table} LEFT JOIN {table} ON {instance}={new_instance};
DROP TABLE IF EXISTS {new_table};
ALTER TABLE {temp_table} DROP COLUMN {instance};
ALTER TABLE {temp_table} RENAME COLUMN {new_instance} to {instance};
DROP TABLE IF EXISTS {table};
ALTER TABLE {temp_table} RENAME TO {table}
"""
cur.execute(query)
conn.commit()
print(f'[INFO] Model / training data mapping table updated.')
# evaluate model performance and compare with performance of other models
from . import evaluator
table = 'model_scores'
query = f"SELECT * FROM {table}"
cur.execute(query)
conn.commit()
sql_model_scores = pd.DataFrame(cur.fetchall())
# close database connection
cur.close()
# evaluate new model and append scores to model score table
[f1, eval_date, eval_time, y_true, y_pred] = evaluator.evaluate_model(model_path, x_test_paths, y_test)
rank = 1
model_results = [model_name, f1, rank, eval_date, eval_time, y_true, y_pred]
sql_model_scores = sql_model_scores.append([model_results], ignore_index=True)
sql_model_scores = sql_model_scores.rename(columns={0:'model_name', 1:'f1_score', 2:'rank', 3:'evaluation_date', 4:'evaluation_time', 5:'true_gestures', 6:'predicted_gestures'})
# rank models by f1 score
sql_model_scores = sql_model_scores.sort_values('f1_score', ascending=False, ignore_index=True)
rank_vals = []
for i in range(len(sql_model_scores)):
rank_vals.append(i+1)
sql_model_scores['rank'] = rank_vals
# replace database table with new model scores
engine = create_engine("postgresql://{user}:{pw}@{host}/{name}".format(host=Config.DB_HOST, user=Config.DB_USER, pw=Config.DB_PASS, name=Config.DB_NAME))
sql_model_scores.to_sql(table, con=engine, if_exists='replace', index=False)
engine.dispose() | 51.807843 | 233 | 0.660737 | from config import Config
import psycopg2
from psycopg2.extras import Json, DictCursor
import pdb
import pandas as pd
import os
import time
import cv2
from gesture_recognition import featurizer
def orchestrator():
"""Pull frames with confidence, accurate predictions from database and use them to generate new model."""
# define database names
db_frames = 'frames'
db_model_scores = 'model_scores'
db_users = 'users'
db_conf_preds = 'confident_preds'
# define feature names
instance = 'instance'
user_id = 'user_id'
root_dir = 'root_dir'
pred_gest = 'pred_gest'
true_gest = 'true_gest'
pred_conf= 'pred_conf'
processed_path = 'processed_path'
# select all high-scoring predictions. These will be used to train new models.
conn = psycopg2.connect(host=Config.DB_HOST, database=Config.DB_NAME, user=Config.DB_USER, password=Config.DB_PASS)
cur = conn.cursor(cursor_factory=DictCursor)
confidence_threshold = 0 # way too low; used for testing
features = f'{instance}, {user_id}, {true_gest}, {pred_conf}, {root_dir}, {processed_path}'
query = 'SELECT ' + features + f' FROM {db_frames} WHERE {pred_conf} > {confidence_threshold} AND {pred_gest} = {true_gest}'
cur.execute(query)
conn.commit()
rows = cur.fetchall()
# make dataframe for high-scoring predictions that includes rotated images. Rotated images will enhance training results.
columns = [feature.strip() for feature in features.split(",")]
df = pd.DataFrame(rows, columns=columns)
df = df.drop(pred_conf, axis=1)
df = df[df.notnull()]
# exit if no frames in database
if df.empty:
print(f'[ERROR] No accurately predicted frames with prediction confidence > {confidence_threshold} in {db_frames}.')
cur.close()
else:
print(f'[INFO] Confident predictions pulled from {db_frames} table.')
# generate rotated images, save files to file storage system, append paths to dataframe
processed_path = 'processed_path'
flipped_path = 'flipped_path'
mirrored_path = 'mirrored_path'
mirrored_flipped_path = 'mirrored_flipped_path'
rotated_image_path_feats = [flipped_path, mirrored_path, mirrored_flipped_path]
for feat in rotated_image_path_feats:
df[feat] = None
df_feats = [instance, user_id, root_dir]
start_time = time.time()
for i in range(len(df)):
orig_path = df[processed_path][i]
frame_orig = cv2.imread(orig_path)
(_, frame_orig) = cv2.threshold(frame_orig, 127, 255, cv2.THRESH_BINARY)
row_orig = df.iloc[i]
rotate_dict = featurizer.rotate(frame_orig, row_orig, df_feats)
rotate_keys = list(rotate_dict.keys())
root_dir_path = row_orig[root_dir]
rotated_dir = os.path.join(root_dir_path, 'rotated')
if os.path.isdir(rotated_dir) == False:
print('[INFO] Creating directory for rotated images.')
os.mkdir(rotated_dir)
user_id_num = str(row_orig[user_id])
user_dir = os.path.join(rotated_dir, str(user_id_num))
if os.path.isdir(user_dir) == False:
print(f'[INFO] Creating directory for rotated images from user {user_id_num}.')
os.mkdir(user_dir)
for key in rotate_keys:
frame = rotate_dict[key]['frame']
path = rotate_dict[key]['path']
cv2.imwrite(path, frame)
try:
column = key + '_path'
df[column][i] = path
except:
print('[ERROR] Unable to save rotated image path to database or dataframe')
print(f'[INFO] Processing rotated images took {time.time() - start_time} seconds')
# drop user_id and root_dir from data frame
df = df.drop([user_id, root_dir], axis=1)
df = df.rename(columns={'true_gest': 'gesture'})
# add table of confident predictions to database
from sqlalchemy import create_engine
engine = create_engine("postgresql://{user}:{pw}@{host}/{name}".format(host=Config.DB_HOST, user=Config.DB_USER, pw=Config.DB_PASS, name=Config.DB_NAME))
table = 'conf_preds'
df.to_sql(table, con=engine, if_exists='replace', index=False) # would be better to append existing table conf_preds but current design processes all images from database rather than just new ones. Will update in the future.
print(f'[INFO] Table of confident predictions updated.')
# check if sufficient number of each gesture present in table of confident predictions. If not, exit since a new model cannot be trained
from objects import gestures_map # may place gestures_map on database. stored models should be saved with gestures_map they correspond with. example: train new model with additional gestures
gestures_list = list(gestures_map.values())
df_gestures_list = list(df['gesture'].unique())
differing_gestures = [gesture for gesture in gestures_list if gesture not in df_gestures_list]
if differing_gestures != []:
print(f'[ERROR] Not enough confident predictions have been made for {differing_gestures}. Unable to split data.')
return
# generate new table with image paths transposed for convenient model training
df_conf_preds = pd.DataFrame()
for i in range(len(df)):
row = df.iloc[i]
instance_val = row[instance]
gesture_val = row['gesture']
# append row for each file path. the predicted and true gestures of each file are the same
df_conf_preds = df_conf_preds.append([[instance_val + '_og', gesture_val, row[processed_path]]], ignore_index=True)
df_conf_preds = df_conf_preds.append([[instance_val + '_f', gesture_val, row[flipped_path]]], ignore_index=True)
df_conf_preds = df_conf_preds.append([[instance_val + '_m', gesture_val, row[mirrored_path]]], ignore_index=True)
df_conf_preds = df_conf_preds.append([[instance_val + '_mf', gesture_val, row[mirrored_flipped_path]]], ignore_index=True)
df_conf_preds = df_conf_preds.rename(columns={0: instance, 1: 'gesture', 2: 'path'})
# form y_data from confident predictions dataframe
from keras.utils import to_categorical
y_data = df_conf_preds['gesture']
for cat in list(gestures_map.keys()):
gesture_name = gestures_map[cat]
y_data = y_data.replace(gesture_name, cat)
y_data = to_categorical(y_data, num_classes=len(gestures_map.keys()))
y_data = pd.DataFrame(y_data)
# reduce table size to count of least occurring gesture
import random
driving_count = -1
for i in y_data.columns:
gesture_count = len(y_data[y_data[i] == 1][i])
if gesture_count < driving_count or driving_count == -1:
driving_count = gesture_count
indices = []
for i in y_data.columns:
gesture_indices = list(y_data[y_data[i] == 1][i].index);
sample_indices = random.sample(gesture_indices, driving_count);
indices.extend(sample_indices)
y_data = y_data.iloc[indices]
# form x_data from confident predictions dataframe. Size of x_data driven by least occuring gesture
x_data = df_conf_preds['path'].iloc[indices]
# split data into training (72%), validation (8%), and testing (20%) sets
test_size = 0.2
if len(x_data) < len(gestures_list)/test_size:
print(f'[ERROR] Not enough confident predictions have been made. Unable to split data.')
return
from sklearn.model_selection import train_test_split
x_train_paths, x_test_paths, y_train, y_test = train_test_split(x_data, y_data, test_size=0.2, stratify=y_data)
x_train_paths, x_val_paths, y_train, y_val = train_test_split(x_train_paths, y_train, test_size=0.1, stratify=y_train)
print(f'[INFO] Prepared training data. Building model...')
# build model
from .builder import build_and_save_model
from objects import gestures_map # ideally, the gesture map should be capable of dynamically impacting the training cycle. However, I am assuming the set of predicted gestures will not change
model_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'models')
[model_path, training_date] = build_and_save_model(x_train_paths, x_val_paths, y_train, y_val, model_dir) # wait until data collection infrastructure in place to train new models
# determine model_name based on entries in database
query = 'SELECT model_name from models'
cur.execute(query)
conn.commit()
rows = cur.fetchall()
if rows == []:
model_name = 'model_0'
else:
name_start = 'model_'
last_num = int(rows[-1][0].split(name_start)[1])
model_name = name_start + str(last_num+1)
# push model to database
gestures_map = Json(gestures_map)
model = open(model_path, 'rb').read()
model_blob = psycopg2.Binary(model)
table = 'models'
query = f"INSERT INTO {table}(model_name, training_date, gestures_map, model, model_path) VALUES('{model_name}', '{training_date}', {gestures_map}, {model_blob}, '{model_path}')"
cur.execute(query)
conn.commit()
print(f'[INFO] New model stored in database.')
# make dataframe containing all instances used to train new model
new_instances = df_conf_preds.loc[list(x_train_paths.index)]['instance'].sort_index()
df_new_instances = pd.DataFrame(new_instances)
df_new_instances[model_name] = 1
# update table that contains which frame instances were used to train which model(s)
# In the long run, this table may be helpful for determining which training images correspond with accurate models
# There is likely a cleaner way to accomplish this
table = 'model_train_data_map'
query = f"SELECT {instance} FROM {table}"
cur.execute(query)
conn.commit()
sql_instances = cur.fetchall()
if sql_instances != []:
df_sql_instances = pd.DataFrame(sql_instances).rename(columns={0: "instance"})
df_new_instances = df_new_instances.merge(df_sql_instances, how='outer', on='instance')
# push temporary table to database that contains all training instances with instances used to train new model indicated with "1"
new_instance = 'new_instance'
df_new_instances = df_new_instances.rename(columns={instance: new_instance})
new_table = 'new_table'
df_new_instances.to_sql(new_table, con=engine, if_exists='replace', index=False)
engine.dispose()
# on database, merge newly created temporary table with original one
temp_table = 'temp_table'
query = f"""
DROP TABLE IF EXISTS {temp_table};
SELECT * INTO {temp_table} FROM {new_table} LEFT JOIN {table} ON {instance}={new_instance};
DROP TABLE IF EXISTS {new_table};
ALTER TABLE {temp_table} DROP COLUMN {instance};
ALTER TABLE {temp_table} RENAME COLUMN {new_instance} to {instance};
DROP TABLE IF EXISTS {table};
ALTER TABLE {temp_table} RENAME TO {table}
"""
cur.execute(query)
conn.commit()
print(f'[INFO] Model / training data mapping table updated.')
# evaluate model performance and compare with performance of other models
from . import evaluator
table = 'model_scores'
query = f"SELECT * FROM {table}"
cur.execute(query)
conn.commit()
sql_model_scores = pd.DataFrame(cur.fetchall())
# close database connection
cur.close()
# evaluate new model and append scores to model score table
[f1, eval_date, eval_time, y_true, y_pred] = evaluator.evaluate_model(model_path, x_test_paths, y_test)
rank = 1
model_results = [model_name, f1, rank, eval_date, eval_time, y_true, y_pred]
sql_model_scores = sql_model_scores.append([model_results], ignore_index=True)
sql_model_scores = sql_model_scores.rename(columns={0:'model_name', 1:'f1_score', 2:'rank', 3:'evaluation_date', 4:'evaluation_time', 5:'true_gestures', 6:'predicted_gestures'})
# rank models by f1 score
sql_model_scores = sql_model_scores.sort_values('f1_score', ascending=False, ignore_index=True)
rank_vals = []
for i in range(len(sql_model_scores)):
rank_vals.append(i+1)
sql_model_scores['rank'] = rank_vals
# replace database table with new model scores
engine = create_engine("postgresql://{user}:{pw}@{host}/{name}".format(host=Config.DB_HOST, user=Config.DB_USER, pw=Config.DB_PASS, name=Config.DB_NAME))
sql_model_scores.to_sql(table, con=engine, if_exists='replace', index=False)
engine.dispose() | 0 | 0 | 0 |
007bbb746348f00e9c4355aa7932c325a9e58bb7 | 4,535 | py | Python | tasks/scan.py | KodaneFlash/hawk | de614df1fc50cc817be06e9902965dd2d65f9197 | [
"MIT"
] | null | null | null | tasks/scan.py | KodaneFlash/hawk | de614df1fc50cc817be06e9902965dd2d65f9197 | [
"MIT"
] | null | null | null | tasks/scan.py | KodaneFlash/hawk | de614df1fc50cc817be06e9902965dd2d65f9197 | [
"MIT"
] | null | null | null | import nmap
import sys
import os
import multiprocessing
import socket
from colorama import Fore, Back, Style
scanner = nmap.PortScanner()
| 43.605769 | 170 | 0.53914 | import nmap
import sys
import os
import multiprocessing
import socket
from colorama import Fore, Back, Style
scanner = nmap.PortScanner()
def scanStatus(host, inputed):
try:
scanner.scan(host, '1', '-v -sT')
except KeyboardInterrupt:
sys.exit('\n^C\n')
except Exception as e:
e = sys.exc_info()
print(f'[{Fore.RED}!{Style.RESET_ALL}] Error: {Fore.RED}{e}{Style.RESET_ALL}')
sys.exit(1)
else:
if scanner[host].state() == 'up':
print(f'[{Fore.GREEN}+{Style.RESET_ALL}] Status: {host} is {Fore.GREEN}{scanner[host].state()}{Style.RESET_ALL}.')
else:
print(f'[{Fore.YELLOW}?{Style.RESET_ALL}] Status: {host} is {Fore.RED}{scanner[host].state()}{Style.RESET_ALL}.')
sys.exit()
def scan(host, inputed, prstart, prend, scantype):
scanStatus(host, inputed)
print('Scan will start. Press CTRL-C to cancel.')
try:
print(f'[{Fore.YELLOW}?{Style.RESET_ALL}] Scanning {Fore.YELLOW}{host}{Style.RESET_ALL}:{prstart}-{prend}...')
scanner.scan(host, f'{prstart}-{prend}', f'-v {scantype}')
except KeyboardInterrupt:
sys.exit('\n^C\n')
except Exception as e:
e = sys.exc_info()[1]
print(f'[{Fore.RED}!{Style.RESET_ALL}] Error: {Fore.RED}{e}{Style.RESET_ALL}')
else:
if len(scanner[host].all_protocols()) == 0:
print(f'[{Fore.RED}!{Style.RESET_ALL}] {Fore.RED}No port(s) found.{Style.RESET_ALL}')
else:
for protocol in scanner[host].all_protocols():
if scanner[host][protocol].keys():
print(f'\nProtocol: {protocol.upper()}')
print('\n PORT \t\tSTATE \t\tSERVICE')
for port in scanner[host][protocol].keys():
print(f" {Fore.GREEN}{port}{Style.RESET_ALL} \t\t{scanner[host][protocol][port]['state']} \t\t{scanner[host][protocol][port]['name']}")
def scanWithPort(host, inputed, int, i, j, scantype):
try:
if j == 0:
scanStatus(host, inputed)
print(f'[{Fore.YELLOW}?{Style.RESET_ALL}] Scanning {Fore.YELLOW}{host}{Style.RESET_ALL}')
print('Scan will start. Press CTRL-C to cancel.')
scanner.scan(host, f'{int}', f'-v {scantype}')
except KeyboardInterrupt:
sys.exit('^C\n')
except Exception as e:
e = sys.exc_info()[1]
print(f'[{Fore.RED}!{Style.RESET_ALL}] Error: {Fore.RED}{e}{Style.RESET_ALL}')
else:
for protocol in scanner[host].all_protocols():
if scanner[host][protocol].keys():
if j == 0:
print(f'Protocol: {protocol.upper()}')
print('\n PORT \t\tSTATE \t\tSERVICE')
for port in scanner[host][protocol].keys():
print(f" {Fore.GREEN}{port}{Style.RESET_ALL} \t\t{scanner[host][protocol][port]['state']} \t\t{scanner[host][protocol][port]['name']}")
def scanLocalDevices():
network = input('Please type the network you want to scan (Example: 192.168.1.0/24): ')
print(f'The network address is {network}')
try:
print(f'[{Fore.YELLOW}?{Style.RESET_ALL}] Scanning for devices on {Fore.YELLOW}{network}{Style.RESET_ALL} network...')
scanner.scan(hosts = network, arguments = '-v -sn')
except KeyboardInterrupt:
sys.exit('\n^C\n')
except Exception as e:
e = sys.exc_info()[1]
print(f'[{Fore.RED}!{Style.RESET_ALL}] Error: {Fore.RED}{e}{Style.RESET_ALL}')
else:
for host in scanner.all_hosts():
if scanner[host]['status']['state'] == 'up':
try:
if len(scanner[host]['vendor']) == 0:
try:
print(f"[{Fore.GREEN}+{Style.RESET_ALL}] {host} \t {socket.gethostbyaddr(host)[0]}")
except:
print(f"[{Fore.GREEN}+{Style.RESET_ALL}] {host}")
else:
try:
print(f"[{Fore.GREEN}+{Style.RESET_ALL}] {host} \t {scanner[host]['vendor']} \t {socket.gethostbyaddr(host)[0]}")
except:
print(f"[{Fore.GREEN}+{Style.RESET_ALL}] {host} \t {scanner[host]['vendor']}")
except:
print(f"[{Fore.GREEN}+{Style.RESET_ALL}] {host} \t {scanner[host]['vendor']}")
| 4,267 | 0 | 116 |
3eb34fa6cb8beeae879580c7a63d82608bf47010 | 1,490 | py | Python | world/exploration/migrations/0043_auto_20181122_1409.py | tellg/arxcode | f04340f9466c31f59bc13b8e1afd4f5734da4848 | [
"MIT"
] | 5 | 2019-03-16T08:26:53.000Z | 2019-11-27T15:42:16.000Z | world/exploration/migrations/0043_auto_20181122_1409.py | tellg/arxcode | f04340f9466c31f59bc13b8e1afd4f5734da4848 | [
"MIT"
] | 7 | 2018-09-29T05:08:15.000Z | 2021-06-10T21:35:32.000Z | world/exploration/migrations/0043_auto_20181122_1409.py | tellg/arxcode | f04340f9466c31f59bc13b8e1afd4f5734da4848 | [
"MIT"
] | 7 | 2018-09-19T21:11:29.000Z | 2019-11-19T12:46:14.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-11-22 14:09
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| 40.27027 | 149 | 0.636242 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-11-22 14:09
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dominion', '0035_auto_20180831_0922'),
('exploration', '0042_auto_20181122_1357'),
]
operations = [
migrations.CreateModel(
name='MonsterCraftingDrops',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('weight', models.PositiveSmallIntegerField(default=10)),
('min_quantity', models.PositiveSmallIntegerField(default=1)),
('max_quantity', models.PositiveSmallIntegerField(default=1)),
('material', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='dominion.CraftingMaterialType')),
('monster', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='crafting_drops', to='exploration.Monster')),
],
options={
'abstract': False,
},
),
migrations.AlterField(
model_name='shardhavenpuzzleobjectloot',
name='puzzle',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='object_drops', to='exploration.ShardhavenPuzzle'),
),
]
| 0 | 1,277 | 23 |
dca5bd39d6d0f512d47fb32d7d9113e50f17f9eb | 3,977 | py | Python | tests/large/test_mode_replay.py | arjunkhunti-crest/eventgen | 3c551aa9fe53717797ff0bf9cf7d2f094b801bf2 | [
"Apache-2.0"
] | null | null | null | tests/large/test_mode_replay.py | arjunkhunti-crest/eventgen | 3c551aa9fe53717797ff0bf9cf7d2f094b801bf2 | [
"Apache-2.0"
] | null | null | null | tests/large/test_mode_replay.py | arjunkhunti-crest/eventgen | 3c551aa9fe53717797ff0bf9cf7d2f094b801bf2 | [
"Apache-2.0"
] | null | null | null | import re
import time
from datetime import datetime, timedelta
def test_mode_replay(eventgen_test_helper):
"""Test normal replay mode settings"""
events = eventgen_test_helper("eventgen_replay.conf").get_events()
# assert the event length is the same as sample file size
assert len(events) == 12
pattern = re.compile(r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}")
for event in events:
# assert that integer token is replaced
assert "@@integer" not in event
result = pattern.match(event)
assert result is not None
def test_mode_replay_end_1(eventgen_test_helper):
"""Test normal replay mode with end = 2 which will replay the sample twice and exit"""
events = eventgen_test_helper("eventgen_replay_end_1.conf").get_events()
# assert the event length is twice of the events in the sample file
assert len(events) == 24
def test_mode_replay_end_2(eventgen_test_helper):
"""Test normal replay mode with end = -1 which will replay the sample forever"""
helper = eventgen_test_helper("eventgen_replay_end_2.conf")
time.sleep(60)
assert helper.is_alive()
def test_mode_replay_backfill(eventgen_test_helper):
"""Test normal replay mode with backfill = -5s which should be ignore since backfill < interval"""
events = eventgen_test_helper("eventgen_replay_backfill.conf").get_events()
# assert the events length is twice of the events in the sample file
assert len(events) == 24
def test_mode_replay_backfill_greater_interval(eventgen_test_helper):
"""Test normal replay mode with backfill = -120s"""
current_datetime = datetime.now()
events = eventgen_test_helper(
"eventgen_replay_backfill_greater_interval.conf"
).get_events()
# assert the events length is twice of the events in the sample file
assert len(events) == 24
pattern = re.compile(r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}")
for event in events:
result = pattern.match(event)
assert result is not None
event_datetime = datetime.strptime(result.group(), "%Y-%m-%d %H:%M:%S")
assert event_datetime < current_datetime
def test_mode_replay_tutorial1(eventgen_test_helper):
"""Test the replay mode with csv for sample file sample.tutorial1.csv"""
events = eventgen_test_helper("eventgen_tutorial1.conf").get_events()
assert len(events) == 2019
def test_mode_replay_timemultiple(eventgen_test_helper):
"""Test normal replay mode with timeMultiple = 0.5 which will replay the sample with half time interval"""
current_datetime = datetime.now()
events = eventgen_test_helper("eventgen_replay_timeMultiple.conf").get_events()
pattern = re.compile(r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}")
for event in events:
result = pattern.match(event)
assert result is not None
event_datetime = datetime.strptime(result.group(), "%Y-%m-%d %H:%M:%S")
delter_seconds = (event_datetime - current_datetime).total_seconds()
# assert the event time is after (now - earliest) time
assert delter_seconds < 14
def test_mode_replay_csv(eventgen_test_helper):
"""Test normal replay mode with sampletype = csv which will get _raw row from the sample"""
events = eventgen_test_helper("eventgen_replay_csv.conf").get_events()
# assert the events equals to the sample csv file
assert len(events) == 10
def test_mode_replay_with_timezone(eventgen_test_helper):
"""Test normal replay mode with sampletype = csv which will get _raw row from the sample"""
events = eventgen_test_helper("eventgen_replay_csv_with_tz.conf").get_events()
# assert the events equals to the sample csv file
assert len(events) == 4
now_ts = datetime.utcnow() + timedelta(hours=-1)
for event in events:
event_ts = datetime.strptime(event.split(" ")[0], "%Y-%m-%dT%H:%M:%S,%f")
d = now_ts - event_ts
assert d.seconds < 60, "timestamp with timezone check fails."
| 42.308511 | 110 | 0.70606 | import re
import time
from datetime import datetime, timedelta
def test_mode_replay(eventgen_test_helper):
"""Test normal replay mode settings"""
events = eventgen_test_helper("eventgen_replay.conf").get_events()
# assert the event length is the same as sample file size
assert len(events) == 12
pattern = re.compile(r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}")
for event in events:
# assert that integer token is replaced
assert "@@integer" not in event
result = pattern.match(event)
assert result is not None
def test_mode_replay_end_1(eventgen_test_helper):
"""Test normal replay mode with end = 2 which will replay the sample twice and exit"""
events = eventgen_test_helper("eventgen_replay_end_1.conf").get_events()
# assert the event length is twice of the events in the sample file
assert len(events) == 24
def test_mode_replay_end_2(eventgen_test_helper):
"""Test normal replay mode with end = -1 which will replay the sample forever"""
helper = eventgen_test_helper("eventgen_replay_end_2.conf")
time.sleep(60)
assert helper.is_alive()
def test_mode_replay_backfill(eventgen_test_helper):
"""Test normal replay mode with backfill = -5s which should be ignore since backfill < interval"""
events = eventgen_test_helper("eventgen_replay_backfill.conf").get_events()
# assert the events length is twice of the events in the sample file
assert len(events) == 24
def test_mode_replay_backfill_greater_interval(eventgen_test_helper):
"""Test normal replay mode with backfill = -120s"""
current_datetime = datetime.now()
events = eventgen_test_helper(
"eventgen_replay_backfill_greater_interval.conf"
).get_events()
# assert the events length is twice of the events in the sample file
assert len(events) == 24
pattern = re.compile(r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}")
for event in events:
result = pattern.match(event)
assert result is not None
event_datetime = datetime.strptime(result.group(), "%Y-%m-%d %H:%M:%S")
assert event_datetime < current_datetime
def test_mode_replay_tutorial1(eventgen_test_helper):
"""Test the replay mode with csv for sample file sample.tutorial1.csv"""
events = eventgen_test_helper("eventgen_tutorial1.conf").get_events()
assert len(events) == 2019
def test_mode_replay_timemultiple(eventgen_test_helper):
"""Test normal replay mode with timeMultiple = 0.5 which will replay the sample with half time interval"""
current_datetime = datetime.now()
events = eventgen_test_helper("eventgen_replay_timeMultiple.conf").get_events()
pattern = re.compile(r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}")
for event in events:
result = pattern.match(event)
assert result is not None
event_datetime = datetime.strptime(result.group(), "%Y-%m-%d %H:%M:%S")
delter_seconds = (event_datetime - current_datetime).total_seconds()
# assert the event time is after (now - earliest) time
assert delter_seconds < 14
def test_mode_replay_csv(eventgen_test_helper):
"""Test normal replay mode with sampletype = csv which will get _raw row from the sample"""
events = eventgen_test_helper("eventgen_replay_csv.conf").get_events()
# assert the events equals to the sample csv file
assert len(events) == 10
def test_mode_replay_with_timezone(eventgen_test_helper):
"""Test normal replay mode with sampletype = csv which will get _raw row from the sample"""
events = eventgen_test_helper("eventgen_replay_csv_with_tz.conf").get_events()
# assert the events equals to the sample csv file
assert len(events) == 4
now_ts = datetime.utcnow() + timedelta(hours=-1)
for event in events:
event_ts = datetime.strptime(event.split(" ")[0], "%Y-%m-%dT%H:%M:%S,%f")
d = now_ts - event_ts
assert d.seconds < 60, "timestamp with timezone check fails."
| 0 | 0 | 0 |
5eeddddcee7e0edc3a2f862f662a93f9dbfdb494 | 516 | py | Python | build_pipeline/250_invalidate_cdn.py | undefinedvalue/statdev | 4e71961bd9f8e6ee49de4ddc539033ace47967a7 | [
"MIT"
] | null | null | null | build_pipeline/250_invalidate_cdn.py | undefinedvalue/statdev | 4e71961bd9f8e6ee49de4ddc539033ace47967a7 | [
"MIT"
] | null | null | null | build_pipeline/250_invalidate_cdn.py | undefinedvalue/statdev | 4e71961bd9f8e6ee49de4ddc539033ace47967a7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Invalidates CDNs so the caches are refreshed
import boto3
import os
import time
| 27.157895 | 78 | 0.536822 | #!/usr/bin/env python
# Invalidates CDNs so the caches are refreshed
import boto3
import os
import time
def build(src_dir, dst_dir, opts):
distribution_id = os.environ['CF_DIST_ID']
cf = boto3.client('cloudfront')
cf.create_invalidation(DistributionId=distribution_id,
InvalidationBatch={
'Paths': { 'Quantity': 1, 'Items': ['/*'] },
'CallerReference': str(time.time())
})
| 386 | 0 | 23 |
478fa499138e17cac8e71a56b7291ffca49ce6ae | 4,089 | py | Python | powersimdata/tests/test_mocks.py | c-voegele/PowerSimData | 5b1500e573f00a34571316796ff442bfa753871a | [
"MIT"
] | 27 | 2021-02-20T20:55:31.000Z | 2022-02-07T17:27:14.000Z | powersimdata/tests/test_mocks.py | c-voegele/PowerSimData | 5b1500e573f00a34571316796ff442bfa753871a | [
"MIT"
] | 147 | 2021-01-21T03:55:09.000Z | 2022-03-28T19:28:03.000Z | powersimdata/tests/test_mocks.py | c-voegele/PowerSimData | 5b1500e573f00a34571316796ff442bfa753871a | [
"MIT"
] | 27 | 2021-02-03T18:24:47.000Z | 2022-01-26T08:56:17.000Z | import pandas as pd
import pytest
from powersimdata.tests.mock_grid import MockGrid
from powersimdata.tests.mock_scenario import MockScenario
from powersimdata.tests.mock_scenario_info import MockScenarioInfo
period_num = 4
# plant_id is the index
mock_plant = {
"plant_id": [101, 102, 103, 104, 105, 106],
"bus_id": [1001, 1002, 1003, 1004, 1005, 1006],
"type": ["solar", "wind", "ng", "coal", "dfo", "hydro"],
"zone_id": [1, 2, 3, 1, 3, 2],
"GenFuelCost": [0, 0, 3.3, 4.4, 5.5, 0],
"Pmin": [0, 0, 0, 0, 0, 0],
"Pmax": [40, 80, 50, 150, 80, 60],
}
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
| 33.243902 | 88 | 0.657863 | import pandas as pd
import pytest
from powersimdata.tests.mock_grid import MockGrid
from powersimdata.tests.mock_scenario import MockScenario
from powersimdata.tests.mock_scenario_info import MockScenarioInfo
period_num = 4
# plant_id is the index
mock_plant = {
"plant_id": [101, 102, 103, 104, 105, 106],
"bus_id": [1001, 1002, 1003, 1004, 1005, 1006],
"type": ["solar", "wind", "ng", "coal", "dfo", "hydro"],
"zone_id": [1, 2, 3, 1, 3, 2],
"GenFuelCost": [0, 0, 3.3, 4.4, 5.5, 0],
"Pmin": [0, 0, 0, 0, 0, 0],
"Pmax": [40, 80, 50, 150, 80, 60],
}
@pytest.fixture
def mock_pg():
pg = pd.DataFrame(
{
plant_id: [(i + 1) * p for p in range(period_num)]
for i, plant_id in enumerate(mock_plant["plant_id"])
}
)
return pg
@pytest.fixture
def mock_solar(mock_pg):
solar_plant_id = [
plant_id
for i, plant_id in enumerate(mock_plant["plant_id"])
if mock_plant["type"][i] == "solar"
]
return mock_pg[solar_plant_id] * 2
@pytest.fixture
def mock_wind(mock_pg):
wind_plant_id = [
plant_id
for i, plant_id in enumerate(mock_plant["plant_id"])
if mock_plant["type"][i] == "wind"
]
return mock_pg[wind_plant_id] * 4
@pytest.fixture
def mock_hydro(mock_pg):
hydro_plant_id = [
plant_id
for i, plant_id in enumerate(mock_plant["plant_id"])
if mock_plant["type"][i] == "hydro"
]
return mock_pg[hydro_plant_id] * 1.5
class TestMockGrid:
def test_mock_grid_successes(self):
grid = MockGrid(grid_attrs={"plant": mock_plant})
assert isinstance(grid, object), "MockGrid should return an object"
assert hasattr(grid, "plant"), "Plant property should be in the MockGrid"
assert len(grid.branch) == 0, "Branch dataframe should be empty in the MockGrid"
def test_mock_grid_failures(self):
with pytest.raises(TypeError):
MockGrid(grid_attrs="foo")
with pytest.raises(TypeError):
MockGrid(grid_attrs={1: "foo"})
with pytest.raises(ValueError):
MockGrid(grid_attrs={"foo": "bar"})
class TestMockScenario:
def test_mock_pg_stored_properly(self, mock_pg):
scenario = MockScenario(grid_attrs={"plant": mock_plant}, pg=mock_pg)
pg = scenario.state.get_pg()
err_msg = "pg should have dimension (periodNum * len(plant))"
assert pg.shape == mock_pg.shape, err_msg
def test_mock_solar_stored_properly(self, mock_solar):
scenario = MockScenario(grid_attrs={"plant": mock_plant}, solar=mock_solar)
solar = scenario.state.get_solar()
err_msg = "solar should have dimension (periodNum * len(solar_plant))"
assert solar.shape == mock_solar.shape, err_msg
def test_mock_wind_stored_properly(self, mock_wind):
scenario = MockScenario(grid_attrs={"plant": mock_plant}, wind=mock_wind)
wind = scenario.state.get_wind()
err_msg = "wind should have dimension (periodNum * len(wind_plant))"
assert wind.shape == mock_wind.shape, err_msg
def test_mock_hydro_stored_properly(self, mock_hydro):
scenario = MockScenario(grid_attrs={"plant": mock_plant}, hydro=mock_hydro)
hydro = scenario.state.get_hydro()
err_msg = "hydro should have dimension (periodNum * len(hydro_plant))"
assert hydro.shape == mock_hydro.shape, err_msg
class TestMockScenarioInfo:
def test_create_mock_scenario_info(self):
assert MockScenarioInfo() is not None
def test_default_float(self):
mock_s_info = MockScenarioInfo()
assert 42 == mock_s_info.get_demand(1, 2, 3)
def test_info_set_correctly(self):
mock_s_info = MockScenarioInfo()
mock_scenario = MockScenario()
for k in mock_scenario.info.keys():
assert k in mock_s_info.info.keys()
def test_grid_set_correctly(self):
mock_scenario = MockScenario()
mock_s_info = MockScenarioInfo(mock_scenario)
assert mock_scenario.state.get_grid() == mock_s_info.grid
| 3,004 | 6 | 424 |
98c1eb817cab95bc96bf3fec5e9194d3bc60bf96 | 2,575 | py | Python | Chap09/inheritance.py | RiddhiDamani/Python | 06cba66aeafd9dc0fa849ec2112c0786a3e8f001 | [
"MIT"
] | null | null | null | Chap09/inheritance.py | RiddhiDamani/Python | 06cba66aeafd9dc0fa849ec2112c0786a3e8f001 | [
"MIT"
] | null | null | null | Chap09/inheritance.py | RiddhiDamani/Python | 06cba66aeafd9dc0fa849ec2112c0786a3e8f001 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2009-2017 BHG http://bw.org/
# Class inheritance is the fundamental part of OOP
# allows you to extend your class by deriving properties/variables and methods from parent classes.
# no longer providing default values.
# it is bcz this is going to be the base class and it's going too be inherited in order to be used.
# bcz of this we need to do extra checking in our getters and setters.
# we cannot just return a value, we need to check and see whether the value is actually there.
# so, using exceptions here - exception tries to return a value and if it fails it returns None instead.
# using duck class to inherit base class animal.
# using kitten class to inherit base class animal.
# s - string that will identify the target of its predator
if __name__ == '__main__': main()
| 30.654762 | 108 | 0.617087 | #!/usr/bin/env python3
# Copyright 2009-2017 BHG http://bw.org/
# Class inheritance is the fundamental part of OOP
# allows you to extend your class by deriving properties/variables and methods from parent classes.
class Animal:
# no longer providing default values.
# it is bcz this is going to be the base class and it's going too be inherited in order to be used.
# bcz of this we need to do extra checking in our getters and setters.
# we cannot just return a value, we need to check and see whether the value is actually there.
# so, using exceptions here - exception tries to return a value and if it fails it returns None instead.
def __init__(self, **kwargs):
if 'type' in kwargs:
self._type = kwargs['type']
if 'name' in kwargs:
self._name = kwargs['name']
if 'sound' in kwargs:
self._sound = kwargs['sound']
def type(self, t=None):
if t: self._type = t
try:
return self._type
except AttributeError:
return None
def name(self, n=None):
if n: self._name = n
try:
return self._name
except AttributeError:
return None
def sound(self, s=None):
if s: self._sound = s
try:
return self._sound
except AttributeError:
return None
# using duck class to inherit base class animal.
class Duck(Animal):
def __init__(self, **kwargs):
self._type = 'duck'
# check if there is a type in the keyword args, if so we delete that
if 'type' in kwargs:
del kwargs['type']
# through super function we call parent class initializer with our kwargs.
# super() always calls the parent class.
super().__init__(**kwargs)
# using kitten class to inherit base class animal.
class Kitten(Animal):
def __init__(self, **kwargs):
self._type = 'kitten'
if 'type' in kwargs: del kwargs['type']
super().__init__(**kwargs)
# s - string that will identify the target of its predator
def kill(self, s):
print(f'{self.name()} will now kill all {s}!')
def print_animal(o):
if not isinstance(o, Animal):
raise TypeError('print_animal(): requires an Animal')
print(f'The {o.type()} is named "{o.name()}" and says "{o.sound()}".')
def main():
a0 = Kitten(name='fluffy', sound='rwar')
a1 = Duck(name='donald', sound='quack')
print_animal(a0)
print_animal(a1)
a0.kill('humans')
if __name__ == '__main__': main()
| 1,434 | -10 | 298 |
5eee50537386ae39b316d1fd68278deb85410400 | 550 | py | Python | mixer/helper/reader_helper.py | Jwuthri/GtfsTools | d0db0c89588f936f02d4e6cccb70034ec1e4b9b1 | [
"MIT"
] | 2 | 2017-10-30T07:27:02.000Z | 2021-11-09T18:50:13.000Z | mixer/helper/reader_helper.py | Jwuthri/GtfsTools | d0db0c89588f936f02d4e6cccb70034ec1e4b9b1 | [
"MIT"
] | 1 | 2017-02-24T20:50:10.000Z | 2017-02-24T22:40:33.000Z | mixer/helper/reader_helper.py | Jwuthri/GtfsTools | d0db0c89588f936f02d4e6cccb70034ec1e4b9b1 | [
"MIT"
] | null | null | null | """Here are the db connection."""
import importlib
import logging
from mixer.settings import db_type
from mixer.glogger import logger
class Reader(object):
"""Helper to gen the reader class."""
def __init__(self, db_name):
"""Constructor."""
DB = getattr(
importlib.import_module(
"utilities.database.{}".format(db_type)
), "DB"
)
logger.log(logging.INFO, "Initialize DB connection")
self.db = DB(db_name)
self.db_name = db_name
| 25 | 61 | 0.583636 | """Here are the db connection."""
import importlib
import logging
from mixer.settings import db_type
from mixer.glogger import logger
class Reader(object):
"""Helper to gen the reader class."""
def __init__(self, db_name):
"""Constructor."""
DB = getattr(
importlib.import_module(
"utilities.database.{}".format(db_type)
), "DB"
)
logger.log(logging.INFO, "Initialize DB connection")
self.db = DB(db_name)
self.db_name = db_name
| 0 | 0 | 0 |
3fe2fe671496f997af36a47fd5c43e2d207766db | 1,964 | py | Python | Logistic-Regression-Insurance-claim-prediction/code.py | ChandrakantKate/ga-learner-dsmp-repo | e6c53282bbd42c8055c18a2f1203ea76eafa102a | [
"MIT"
] | null | null | null | Logistic-Regression-Insurance-claim-prediction/code.py | ChandrakantKate/ga-learner-dsmp-repo | e6c53282bbd42c8055c18a2f1203ea76eafa102a | [
"MIT"
] | null | null | null | Logistic-Regression-Insurance-claim-prediction/code.py | ChandrakantKate/ga-learner-dsmp-repo | e6c53282bbd42c8055c18a2f1203ea76eafa102a | [
"MIT"
] | null | null | null | # --------------
# import the libraries
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.model_selection import train_test_split
import warnings
warnings.filterwarnings('ignore')
# Code starts here
df = pd.read_csv(path)
print(df.head())
X = df.drop('insuranceclaim',axis=1)
y = df['insuranceclaim']
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2,random_state=6)
# Code ends here
# --------------
import matplotlib.pyplot as plt
# Code starts here
plt.boxplot(X_train['bmi'])
q_value = X_train['bmi'].quantile(0.95)
y_train.value_counts()
# Code ends here
# --------------
# Code starts here
relation = X_train.corr()
print(relation)
sns.pairplot(X_train)
# Code ends here
# --------------
import seaborn as sns
import matplotlib.pyplot as plt
# Code starts here
cols = ['children','sex','region','smoker']
fig, axes = plt.subplots(2, 2, figsize=(10,10))
for i in range(2):
for j in range(2):
ax = axes[i,j]
col = cols[i*2+j]
sns.countplot(x=X_train[col],hue=y_train,ax=ax)
# Code ends here
# --------------
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
# parameters for grid search
parameters = {'C':[0.1,0.5,1,5]}
# Code starts here
lr = LogisticRegression()
grid = GridSearchCV(lr,parameters)
grid.fit(X_train,y_train)
y_pred = grid.predict(X_test)
accuracy = accuracy_score(y_test,y_pred)
print(accuracy)
# Code ends here
# --------------
from sklearn.metrics import roc_auc_score, auc
from sklearn import metrics
# Code starts here
#y_scores = grid.decision_function(X_test)
score = roc_auc_score(y_pred,y_test)
y_pred_proba = grid.predict_proba(X_test)[:,1]
fpr, tpr,_ = metrics.roc_curve(y_test,y_pred)
roc_auc = roc_auc_score(y_test, y_pred_proba)
auc = auc(fpr,tpr)
plt.plot(fpr,tpr,label='Logistic model, auc='+str(auc))
# Code ends here
| 21.347826 | 82 | 0.709267 | # --------------
# import the libraries
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.model_selection import train_test_split
import warnings
warnings.filterwarnings('ignore')
# Code starts here
df = pd.read_csv(path)
print(df.head())
X = df.drop('insuranceclaim',axis=1)
y = df['insuranceclaim']
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2,random_state=6)
# Code ends here
# --------------
import matplotlib.pyplot as plt
# Code starts here
plt.boxplot(X_train['bmi'])
q_value = X_train['bmi'].quantile(0.95)
y_train.value_counts()
# Code ends here
# --------------
# Code starts here
relation = X_train.corr()
print(relation)
sns.pairplot(X_train)
# Code ends here
# --------------
import seaborn as sns
import matplotlib.pyplot as plt
# Code starts here
cols = ['children','sex','region','smoker']
fig, axes = plt.subplots(2, 2, figsize=(10,10))
for i in range(2):
for j in range(2):
ax = axes[i,j]
col = cols[i*2+j]
sns.countplot(x=X_train[col],hue=y_train,ax=ax)
# Code ends here
# --------------
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
# parameters for grid search
parameters = {'C':[0.1,0.5,1,5]}
# Code starts here
lr = LogisticRegression()
grid = GridSearchCV(lr,parameters)
grid.fit(X_train,y_train)
y_pred = grid.predict(X_test)
accuracy = accuracy_score(y_test,y_pred)
print(accuracy)
# Code ends here
# --------------
from sklearn.metrics import roc_auc_score, auc
from sklearn import metrics
# Code starts here
#y_scores = grid.decision_function(X_test)
score = roc_auc_score(y_pred,y_test)
y_pred_proba = grid.predict_proba(X_test)[:,1]
fpr, tpr,_ = metrics.roc_curve(y_test,y_pred)
roc_auc = roc_auc_score(y_test, y_pred_proba)
auc = auc(fpr,tpr)
plt.plot(fpr,tpr,label='Logistic model, auc='+str(auc))
# Code ends here
| 0 | 0 | 0 |
1755f9e2f59a10f2dae8d950fcd4b40d9bc268b5 | 3,763 | py | Python | Recover Binary Search Tree.py | JazzikPeng/Algorithm-in-Python | 915135b1cdd02a6bb8d7068a54b2f497b2ec31d4 | [
"MIT"
] | 3 | 2018-02-05T06:15:57.000Z | 2019-04-07T23:33:07.000Z | Recover Binary Search Tree.py | JazzikPeng/Algorithm-in-Python | 915135b1cdd02a6bb8d7068a54b2f497b2ec31d4 | [
"MIT"
] | null | null | null | Recover Binary Search Tree.py | JazzikPeng/Algorithm-in-Python | 915135b1cdd02a6bb8d7068a54b2f497b2ec31d4 | [
"MIT"
] | null | null | null | # Definition for a binary tree node.
# Do in order traversal. The in order traversal is monotonically increase
# O(1) Space, can not use iterative method or recursive solution, both use space
# class Solution(object):
# first = TreeNode(None)
# second = TreeNode(None)
# prev = TreeNode(None)
# def recoverTree(self, root):
# """
# :type root: TreeNode
# :rtype: None Do not return anything, modify root in-place instead.
# """
# # Recursion Method
# if root is None:
# return
# def helper(self, curr):
# if curr is None:
# return
# helper(curr.left)
# if prev is not None and prev.val >= curr.val:
# # have mistake first is the prev node, second is the curr node
# Morris Traversal O(1) solution
| 32.439655 | 80 | 0.459208 | # Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# Do in order traversal. The in order traversal is monotonically increase
# O(1) Space, can not use iterative method or recursive solution, both use space
# class Solution(object):
# first = TreeNode(None)
# second = TreeNode(None)
# prev = TreeNode(None)
# def recoverTree(self, root):
# """
# :type root: TreeNode
# :rtype: None Do not return anything, modify root in-place instead.
# """
# # Recursion Method
# if root is None:
# return
# def helper(self, curr):
# if curr is None:
# return
# helper(curr.left)
# if prev is not None and prev.val >= curr.val:
# # have mistake first is the prev node, second is the curr node
# Morris Traversal O(1) solution
class Solution(object):
def recoverTree(self, root):
"""
:type root: TreeNode
:rtype: None Do not return anything, modify root in-place instead.
"""
first = TreeNode(None)
second = TreeNode(None)
prev = TreeNode(-float('inf'))
firstTime = True
# Recursion Method
while root is not None:
if root.left is not None:
temp = root.left
while temp.right is not None and temp.right is not root:
temp = temp.right
if temp.right is None:
temp.right = root
root = root.left
else:
temp.right = None
if prev.val > root.val and firstTime:
first = prev
firstTime = False
if prev.val > root.val and not firstTime:
second = root
prev = root
root = root.left
else:
# visit root.val
if prev.val > root.val and firstTime:
first = prev
firstTime = False
if prev.val > root.val and not firstTime:
second = root
prev = root
root = root.right
# Now we can swap
if first is not None and second is not None:
val = first.val
first.val = second.val
second.val = val
class Solution(object):
def recoverTree(self, root):
"""
Do not return anything, modify root in-place instead.
"""
point = root
last = None # last point
big = None
small = None
while point:
if point.left is None:
# visit
if last and last.val > point.val:
if big is None:
big = last
small = point
last = point
# end visit
point = point.right
else:
pre = point.left
while pre.right and pre.right is not point:
pre = pre.right
if pre.right is None:
pre.right = point
point = point.left
else:
pre.right = None
# visit
if last and last.val > point.val:
if big is None:
big = last
small = point
last = point
# end visit
point = point.right
big.val, small.val = small.val, big.val | 73 | 2,737 | 93 |
8f8abd51d28441a8163759ad81bbc9edd88b368d | 1,370 | py | Python | utils/CreateTemplate.py | ausaafnabi/ML-OverRPC-API | b2ddbfac3f4c4f5ae97e030be7e4a4dfcd4d6635 | [
"MIT"
] | 1 | 2020-06-23T17:02:09.000Z | 2020-06-23T17:02:09.000Z | utils/CreateTemplate.py | ausaafnabi/ML-OverRPC-API | b2ddbfac3f4c4f5ae97e030be7e4a4dfcd4d6635 | [
"MIT"
] | 7 | 2020-03-31T06:46:14.000Z | 2020-04-12T11:25:39.000Z | utils/CreateTemplate.py | ausaafnabi/ML-OverRPC-API | b2ddbfac3f4c4f5ae97e030be7e4a4dfcd4d6635 | [
"MIT"
] | 1 | 2020-03-31T07:24:01.000Z | 2020-03-31T07:24:01.000Z | import os
from utils.Template_directory import *
from utils.utilities import *
import sys
sys.path.append('../')
from core.Renderer.FileRenderer import Renderer
layer1 = [Experiment,Production]
layer1_names = ['Experiment','Production']
Files = File
| 33.414634 | 75 | 0.658394 | import os
from utils.Template_directory import *
from utils.utilities import *
import sys
sys.path.append('../')
from core.Renderer.FileRenderer import Renderer
layer1 = [Experiment,Production]
layer1_names = ['Experiment','Production']
Files = File
def GetCurrentDirectory():
currentDirectory = os.getcwd()
return currentDirectory
def CreateDirectory(directory):
try:
os.makedirs(directory)
except OSError:
print ('Error: Creating directory. ' + directory)
def Generator(root_dir):
for i in range(0,len(layer1)):
layerName = str(root_dir)+'/'+str(layer1_names[i])
CreateDirectory(layerName)
for j in range(0,len(layer1[i])):
dirName = layerName +'/'+str(layer1[i][j])
CreateDirectory(dirName)
filePath = dirName + '/'
FilesGenerator(filePath,str(layer1[i][j]),str(layer1_names[i]))
print("Directory ",dirName," Created")
def FilesGenerator(file_location,foldername,layer_name,dict=Files):
for i in dict[layer_name][foldername]:
filename = dict[layer_name][foldername][i]['filename']
# print(layer_name + foldername + i)
dependency = dict[layer_name][foldername][i]['dependency']
print("CREATING: " + filename + "in" + file_location )
Renderer(file_location,filename,dependency)
| 1,011 | 0 | 96 |
3235e687d4bb817652eb1ff22833902799497e16 | 332 | py | Python | 1-10/problem7.py | anpe9592/projectEuler | 628ae8877bca496d55b95bd55525478bede6e753 | [
"MIT"
] | null | null | null | 1-10/problem7.py | anpe9592/projectEuler | 628ae8877bca496d55b95bd55525478bede6e753 | [
"MIT"
] | null | null | null | 1-10/problem7.py | anpe9592/projectEuler | 628ae8877bca496d55b95bd55525478bede6e753 | [
"MIT"
] | null | null | null | # problem7.py
# By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13.
# What is the 10 001st prime number?
i = 1
z = 1
while i < 10002:
z += 1
if z > 1:
for j in range(2, z):
if z % j == 0:
break
else:
i += 1
print(z)
| 18.444444 | 102 | 0.490964 | # problem7.py
# By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13.
# What is the 10 001st prime number?
i = 1
z = 1
while i < 10002:
z += 1
if z > 1:
for j in range(2, z):
if z % j == 0:
break
else:
i += 1
print(z)
| 0 | 0 | 0 |
c3dabb085af97c158d1e88e44c42684ccc05417d | 2,859 | py | Python | setup.py | ShivanshShalabh/zoomobot- | e2cf7a7bba5515248e5754702bb3daba626b615f | [
"MIT"
] | null | null | null | setup.py | ShivanshShalabh/zoomobot- | e2cf7a7bba5515248e5754702bb3daba626b615f | [
"MIT"
] | null | null | null | setup.py | ShivanshShalabh/zoomobot- | e2cf7a7bba5515248e5754702bb3daba626b615f | [
"MIT"
] | null | null | null | import re
import os
if __name__ == '__main__':
# Check if file with name Cache.txt exists
if os.path.isfile('Cache.txt'):
# If file exists, delete it
os.remove('Cache.txt')
# Create file with name Cache.txt
name = input('Enter your name (Enter -1 to skip): ')
while not name:
name = input('Enter your name (Enter -1 to skip): ')
if name == '-1':
name = ''
skip_column = input('Enter column number to skip in the excel file (Enter -1 to skip): ')
while (not skip_column or not skip_column.isdigit()) and skip_column != '-1':
if not skip_column.isdigit():
print("Invalid input :(\nEnter an integer")
skip_column = ""
skip_column = input(
'Enter column number to skip in the excel file (Enter -1 to skip): ')
if skip_column == '-1':
skip_column = ''
skip_row = input('Enter row number to skip in the excel file (Enter -1 to skip): ')
while (not skip_row or not skip_row.isdigit()) and skip_row != '-1':
if not skip_row.isdigit():
print("Invalid input :(\nEnter an integer")
skip_row = ""
skip_row = input('Enter row number to skip in the excel file (Enter -1 to skip): ')
if skip_row == '-1':
skip_row = ''
# Write name, skip_column and skip_row to Cache.txt
color = input(
'Enter hex value of the color with which you want to color the cell (Enter -1 to skip): ')
while not isValidHexaCode(color) and color != '-1':
color = input(
'Enter hex value of the color with which you want to color the cell (Enter -1 to skip): ')
print("Choose how do you want to extract names from the name list:")
print("Enter 1 to get names from Excel file",
"Enter 2 to get names from txt file", "Enter -1 to skip", sep='\n')
file_input = input("Enter your choice: ")
while file_input not in ['1', '2', '-1']:
print("Enter 1 to get names from Excel file",
"Enter 2 to get names from txt file", "Enter -1 to skip", sep='\n')
file_input = input("Enter your choice: ")
if file_input == '-1':
file_input = ''
if color == '-1':
color = ''
with open('Cache.txt', 'w') as f:
f.write(name + "|Name" + '\n' + skip_column +
"|No. of columns to skip" + '\n' + skip_row + '|No. of rows to skip\n'+color+'|Cell Color\n' + file_input + '|File Input')
| 37.618421 | 138 | 0.582721 | import re
import os
def isValidHexaCode(str):
# Regex to check valid
# hexadecimal color code.
regex = "^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$"
# Compile the ReGex
p = re.compile(regex)
# If the string is empty
# return false
if(str == None):
return False
# Return if the string
# matched the ReGex
if(re.search(p, str)):
return True
else:
return False
if __name__ == '__main__':
# Check if file with name Cache.txt exists
if os.path.isfile('Cache.txt'):
# If file exists, delete it
os.remove('Cache.txt')
# Create file with name Cache.txt
name = input('Enter your name (Enter -1 to skip): ')
while not name:
name = input('Enter your name (Enter -1 to skip): ')
if name == '-1':
name = ''
skip_column = input('Enter column number to skip in the excel file (Enter -1 to skip): ')
while (not skip_column or not skip_column.isdigit()) and skip_column != '-1':
if not skip_column.isdigit():
print("Invalid input :(\nEnter an integer")
skip_column = ""
skip_column = input(
'Enter column number to skip in the excel file (Enter -1 to skip): ')
if skip_column == '-1':
skip_column = ''
skip_row = input('Enter row number to skip in the excel file (Enter -1 to skip): ')
while (not skip_row or not skip_row.isdigit()) and skip_row != '-1':
if not skip_row.isdigit():
print("Invalid input :(\nEnter an integer")
skip_row = ""
skip_row = input('Enter row number to skip in the excel file (Enter -1 to skip): ')
if skip_row == '-1':
skip_row = ''
# Write name, skip_column and skip_row to Cache.txt
color = input(
'Enter hex value of the color with which you want to color the cell (Enter -1 to skip): ')
while not isValidHexaCode(color) and color != '-1':
color = input(
'Enter hex value of the color with which you want to color the cell (Enter -1 to skip): ')
print("Choose how do you want to extract names from the name list:")
print("Enter 1 to get names from Excel file",
"Enter 2 to get names from txt file", "Enter -1 to skip", sep='\n')
file_input = input("Enter your choice: ")
while file_input not in ['1', '2', '-1']:
print("Enter 1 to get names from Excel file",
"Enter 2 to get names from txt file", "Enter -1 to skip", sep='\n')
file_input = input("Enter your choice: ")
if file_input == '-1':
file_input = ''
if color == '-1':
color = ''
with open('Cache.txt', 'w') as f:
f.write(name + "|Name" + '\n' + skip_column +
"|No. of columns to skip" + '\n' + skip_row + '|No. of rows to skip\n'+color+'|Cell Color\n' + file_input + '|File Input')
| 382 | 0 | 23 |
366737a828415b6bfb9f1a6b95aefbecdab7cc3d | 2,259 | py | Python | Examples/scripts/vector_scalar.py | jenkayco/hacknostics | 4f980b17a2648cb6547cd2d8b442ae23253ab5e6 | [
"MIT"
] | 2 | 2019-06-04T20:10:46.000Z | 2021-06-07T21:10:39.000Z | Examples/scripts/vector_scalar.py | jenkayco/hacknostics | 4f980b17a2648cb6547cd2d8b442ae23253ab5e6 | [
"MIT"
] | 2 | 2019-06-05T03:08:06.000Z | 2019-06-05T15:38:01.000Z | Examples/scripts/vector_scalar.py | jenkayco/hacknostics | 4f980b17a2648cb6547cd2d8b442ae23253ab5e6 | [
"MIT"
] | 2 | 2019-06-05T03:11:35.000Z | 2019-06-05T05:33:45.000Z | #================================================#
# vector_scalar.py
# based on: gsn_vec_scal_1.ncl,
# gsn_vec_scal_2.ncl,
# gsn_vec_scal_3.ncl
#================================================#
from pathlib import Path
import numpy as np
import xarray as xr
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
#=================================================#
# open file and read in data
#=================================================#
data_location = Path("/Users/brianpm/Documents/www.ncl.ucar.edu/Applications/Data/cdf/")
data_file = data_location / "uvt.nc"
f1 = xr.open_dataset(data_file)
u = f1['U'][0,0,:,:] # read in example data [2D only here]
v = f1['V'][0,0,:,:]
speed = (u**2 + v**2)**0.5
#=================================================#
# PLOT 1 - Vector field colored by a scalar.
#=================================================#
outfile_ext = "png"
outfilename = "gsn_vec_scal"
wks, ax = plt.subplots()
plot = ax.quiver(u,v,speed)
# you can change the relative size of the arrows
# with the scale kwarg, but it requires quite
# a bit of tuning.
# plot = ax.quiver(u,v,speed, scale=350)
# you can still concatenate strings with +:
wks.savefig("/Users/brianpm/Desktop/"+outfilename+"."+outfile_ext)
#=================================================#
# PLOT 2 - Contour plot with vectors on top
#=================================================#
wks2, ax2 = plt.subplots()
plot2 = ax2.contourf(speed[10:30,20:40]) # contour the variable
plotV = ax2.quiver(u[10:30, 20:40], v[10:30, 20:40])
wks2.savefig("/Users/brianpm/Desktop/"+outfilename+"2."+outfile_ext)
#=================================================#
# Plot 3 - Put it on a map
#=================================================#
wks3, ax3 = plt.subplots(subplot_kw={"projection":ccrs.PlateCarree()})
lon = f1['lon']
lat = f1['lat']
lons, lats = np.meshgrid(lon, lat)
plot3 = ax3.quiver(lons, lats, u, v, speed, transform=ccrs.PlateCarree())
ax3.set_title("Basic Vector/Scalar/Map Plot")
ax3.set_extent([lon.min(), lon.max(), lat.min(), lat.max()])
ax3.coastlines()
ax3.set_xticks(np.arange(-180, 180, 30))
ax3.set_yticks(np.arange(-90, 90, 30))
ax3.grid()
wks3.savefig("/Users/brianpm/Desktop/"+outfilename+"3."+outfile_ext)
| 37.65 | 88 | 0.536963 | #================================================#
# vector_scalar.py
# based on: gsn_vec_scal_1.ncl,
# gsn_vec_scal_2.ncl,
# gsn_vec_scal_3.ncl
#================================================#
from pathlib import Path
import numpy as np
import xarray as xr
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
#=================================================#
# open file and read in data
#=================================================#
data_location = Path("/Users/brianpm/Documents/www.ncl.ucar.edu/Applications/Data/cdf/")
data_file = data_location / "uvt.nc"
f1 = xr.open_dataset(data_file)
u = f1['U'][0,0,:,:] # read in example data [2D only here]
v = f1['V'][0,0,:,:]
speed = (u**2 + v**2)**0.5
#=================================================#
# PLOT 1 - Vector field colored by a scalar.
#=================================================#
outfile_ext = "png"
outfilename = "gsn_vec_scal"
wks, ax = plt.subplots()
plot = ax.quiver(u,v,speed)
# you can change the relative size of the arrows
# with the scale kwarg, but it requires quite
# a bit of tuning.
# plot = ax.quiver(u,v,speed, scale=350)
# you can still concatenate strings with +:
wks.savefig("/Users/brianpm/Desktop/"+outfilename+"."+outfile_ext)
#=================================================#
# PLOT 2 - Contour plot with vectors on top
#=================================================#
wks2, ax2 = plt.subplots()
plot2 = ax2.contourf(speed[10:30,20:40]) # contour the variable
plotV = ax2.quiver(u[10:30, 20:40], v[10:30, 20:40])
wks2.savefig("/Users/brianpm/Desktop/"+outfilename+"2."+outfile_ext)
#=================================================#
# Plot 3 - Put it on a map
#=================================================#
wks3, ax3 = plt.subplots(subplot_kw={"projection":ccrs.PlateCarree()})
lon = f1['lon']
lat = f1['lat']
lons, lats = np.meshgrid(lon, lat)
plot3 = ax3.quiver(lons, lats, u, v, speed, transform=ccrs.PlateCarree())
ax3.set_title("Basic Vector/Scalar/Map Plot")
ax3.set_extent([lon.min(), lon.max(), lat.min(), lat.max()])
ax3.coastlines()
ax3.set_xticks(np.arange(-180, 180, 30))
ax3.set_yticks(np.arange(-90, 90, 30))
ax3.grid()
wks3.savefig("/Users/brianpm/Desktop/"+outfilename+"3."+outfile_ext)
| 0 | 0 | 0 |
fa4414908dbefc98c70ceea38be4dd04bef4dd48 | 2,691 | py | Python | command_cop.py | jperras/command_cop | 4b2c4f3020b6bdac1ed2973c5c8b2c1ba1b69b21 | [
"MIT"
] | 1 | 2019-09-15T14:04:09.000Z | 2019-09-15T14:04:09.000Z | command_cop.py | jperras/command_cop | 4b2c4f3020b6bdac1ed2973c5c8b2c1ba1b69b21 | [
"MIT"
] | null | null | null | command_cop.py | jperras/command_cop | 4b2c4f3020b6bdac1ed2973c5c8b2c1ba1b69b21 | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
###
# Copyright 2019 Joël Perras <joel@nerderati.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###
###
# Prevent commands from being mistakenly printed to buffers instead of being
# executed due to leading spaces or tabs.
#
# Upon hitting enter with an input that has leading spaces before a slash e.g.
# ` /nick vulpine`, the input will be halted and a message will be printed in
# the core weechat buffer.
#
# There are currently no commands or settings. Simply install and activate this
# script and you're good to go.
###
import re
import weechat
SCRIPT_NAME = "command_cop"
SCRIPT_AUTHOR = "Joël Perras <joel@nerderati.com>"
SCRIPT_VERSION = "0.1"
SCRIPT_LICENSE = "MIT"
SCRIPT_DESC = "Prevent entering of leading spaces before /command."
def command_run_input(data, buffer, command):
""" Function called when a command "/input xxxx" is run."""
if command == "/input return": # As in enter was pressed.
# Get input contents.
input_s = weechat.buffer_get_string(buffer, 'input')
# Match leading spaces before commands (slashes) and spaces just after a
# command slash.
matches = re.match(r'(?:\s+/|/\s+)(.*)', input_s)
if matches is not None:
# Alert in weechat buffer.
weechat.prnt("", "%sLeading spaces detected in command!" % weechat.color('red'))
return weechat.WEECHAT_RC_OK_EAT
return weechat.WEECHAT_RC_OK
if __name__ == '__main__':
if weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE, SCRIPT_DESC, '', ''):
weechat.hook_command_run('/input return', 'command_run_input', '')
| 38.442857 | 105 | 0.719064 | # -*- encoding: utf-8 -*-
###
# Copyright 2019 Joël Perras <joel@nerderati.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###
###
# Prevent commands from being mistakenly printed to buffers instead of being
# executed due to leading spaces or tabs.
#
# Upon hitting enter with an input that has leading spaces before a slash e.g.
# ` /nick vulpine`, the input will be halted and a message will be printed in
# the core weechat buffer.
#
# There are currently no commands or settings. Simply install and activate this
# script and you're good to go.
###
import re
import weechat
SCRIPT_NAME = "command_cop"
SCRIPT_AUTHOR = "Joël Perras <joel@nerderati.com>"
SCRIPT_VERSION = "0.1"
SCRIPT_LICENSE = "MIT"
SCRIPT_DESC = "Prevent entering of leading spaces before /command."
def command_run_input(data, buffer, command):
""" Function called when a command "/input xxxx" is run."""
if command == "/input return": # As in enter was pressed.
# Get input contents.
input_s = weechat.buffer_get_string(buffer, 'input')
# Match leading spaces before commands (slashes) and spaces just after a
# command slash.
matches = re.match(r'(?:\s+/|/\s+)(.*)', input_s)
if matches is not None:
# Alert in weechat buffer.
weechat.prnt("", "%sLeading spaces detected in command!" % weechat.color('red'))
return weechat.WEECHAT_RC_OK_EAT
return weechat.WEECHAT_RC_OK
if __name__ == '__main__':
if weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE, SCRIPT_DESC, '', ''):
weechat.hook_command_run('/input return', 'command_run_input', '')
| 0 | 0 | 0 |
898fd9f2849e86df31a1eb44887c52a2fa952bac | 1,143 | py | Python | config.py | amour-lee/NewsProject | 85fcd3798e84657d13583b0d0344c993aeb87790 | [
"MIT"
] | null | null | null | config.py | amour-lee/NewsProject | 85fcd3798e84657d13583b0d0344c993aeb87790 | [
"MIT"
] | null | null | null | config.py | amour-lee/NewsProject | 85fcd3798e84657d13583b0d0344c993aeb87790 | [
"MIT"
] | null | null | null | from redis import StrictRedis
import logging
# 准备配置类
class Config(object):
"""app配置类"""
# DEBUG = True
# 配置MySQL:指定数据库位置
SQLALCHEMY_DATABASE_URI = 'mysql://root:mysql@mysql@127.0.0.1:3306/information_new'
# 禁用追踪mysql:因为mysql的性能差,如果再去追踪mysql的所有的修改,会再次浪费性能
SQLALCHEMY_TRACK_MODIFICATIONS = False
# 配置redis
REDIS_HOST = '127.0.0.1'
REDIS_PORT = 6379
# 准备秘钥
SECRET_KEY = 'ajkhdflhslfjlfh'
# 配置Session:将flask的session数据引导到redis
SESSION_TYPE = 'redis' # 存储到redis
# 配置redis的位置
SESSION_REDIS=StrictRedis(host=REDIS_HOST,port=REDIS_PORT)
# 使用签名将session的明文转成密文
SESSION_USE_SIGNER = True
# 设置session有效期:一天,以秒为单位
PERMANENT_SESSION_LIFETIME = 60*60*24
class DevelopmentConfig(Config):
"""开发环境配置类
如果开发环境的配置和父类一致,可以直接pass
"""
DEBUG = True
# 开发环境的日志等级为调试模式
LOGGING_LEVEL = logging.DEBUG
class ProductionConfig(Config):
"""生产环境配置类
实际开发中,需要额外配置生产环境下的数据库和其他的信息
"""
DEBUG = False
# 生产环境的日志等级为调试模式
LOGGING_LEVEL = logging.WARNING
# 工厂方法需要的原材料
configs = {
'dev':DevelopmentConfig,
'prod':ProductionConfig
} | 21.566038 | 87 | 0.688539 | from redis import StrictRedis
import logging
# 准备配置类
class Config(object):
"""app配置类"""
# DEBUG = True
# 配置MySQL:指定数据库位置
SQLALCHEMY_DATABASE_URI = 'mysql://root:mysql@mysql@127.0.0.1:3306/information_new'
# 禁用追踪mysql:因为mysql的性能差,如果再去追踪mysql的所有的修改,会再次浪费性能
SQLALCHEMY_TRACK_MODIFICATIONS = False
# 配置redis
REDIS_HOST = '127.0.0.1'
REDIS_PORT = 6379
# 准备秘钥
SECRET_KEY = 'ajkhdflhslfjlfh'
# 配置Session:将flask的session数据引导到redis
SESSION_TYPE = 'redis' # 存储到redis
# 配置redis的位置
SESSION_REDIS=StrictRedis(host=REDIS_HOST,port=REDIS_PORT)
# 使用签名将session的明文转成密文
SESSION_USE_SIGNER = True
# 设置session有效期:一天,以秒为单位
PERMANENT_SESSION_LIFETIME = 60*60*24
class DevelopmentConfig(Config):
"""开发环境配置类
如果开发环境的配置和父类一致,可以直接pass
"""
DEBUG = True
# 开发环境的日志等级为调试模式
LOGGING_LEVEL = logging.DEBUG
class ProductionConfig(Config):
"""生产环境配置类
实际开发中,需要额外配置生产环境下的数据库和其他的信息
"""
DEBUG = False
# 生产环境的日志等级为调试模式
LOGGING_LEVEL = logging.WARNING
# 工厂方法需要的原材料
configs = {
'dev':DevelopmentConfig,
'prod':ProductionConfig
} | 0 | 0 | 0 |
9c575a1207706993c411a4d5dde64251de3cb91a | 1,952 | py | Python | forms.py | wanderindev/fyyur | acf3a44ce7fae6b24576a320afd447c0595d76e5 | [
"MIT"
] | null | null | null | forms.py | wanderindev/fyyur | acf3a44ce7fae6b24576a320afd447c0595d76e5 | [
"MIT"
] | null | null | null | forms.py | wanderindev/fyyur | acf3a44ce7fae6b24576a320afd447c0595d76e5 | [
"MIT"
] | 2 | 2020-07-16T22:02:13.000Z | 2020-11-22T21:16:28.000Z | from datetime import datetime
from flask_wtf import Form
from wtforms import (
BooleanField,
DateTimeField,
SelectField,
SelectMultipleField,
StringField,
)
from wtforms.validators import DataRequired, URL
from constants import GENRES, STATES
| 35.490909 | 78 | 0.697746 | from datetime import datetime
from flask_wtf import Form
from wtforms import (
BooleanField,
DateTimeField,
SelectField,
SelectMultipleField,
StringField,
)
from wtforms.validators import DataRequired, URL
from constants import GENRES, STATES
class ShowForm(Form):
artist_id = StringField("artist_id")
venue_id = StringField("venue_id")
start_time = DateTimeField(
"start_time", validators=[DataRequired()], default=datetime.today()
)
class VenueForm(Form):
name = StringField("name", validators=[DataRequired()])
city = StringField("city", validators=[DataRequired()])
state = SelectField("state", validators=[DataRequired()], choices=STATES,)
address = StringField("address", validators=[DataRequired()])
phone = StringField("phone")
image_link = StringField("image_link")
genres = SelectMultipleField(
"genres", validators=[DataRequired()], choices=GENRES,
)
facebook_link = StringField("facebook_link", validators=[URL()])
website = StringField("website")
seeking_talent = BooleanField(
"seeking_talent", default=True, false_values=("false", "")
)
seeking_description = StringField("seeking_description")
class ArtistForm(Form):
name = StringField("name", validators=[DataRequired()])
city = StringField("city", validators=[DataRequired()])
state = SelectField("state", validators=[DataRequired()], choices=STATES,)
phone = StringField("phone")
genres = SelectMultipleField(
"genres", validators=[DataRequired()], choices=GENRES,
)
image_link = StringField("image_link", validators=[URL()],)
facebook_link = StringField("facebook_link", validators=[URL()],)
website = StringField("website", validators=[URL()],)
seeking_venue = BooleanField(
"seeking_venue", default=True, false_values=(False, "false", "")
)
seeking_description = StringField("seeking_description")
| 0 | 1,617 | 69 |
7194658629832659b219bf86a0f106db81b3b79b | 2,074 | py | Python | src/controllers/joint_space_feedforward_controller.py | MatthiasDR96/industrial_robotics_simulator | 9039e7a581ce97c583c73294e9937664de90530b | [
"MIT"
] | 1 | 2020-10-21T15:32:41.000Z | 2020-10-21T15:32:41.000Z | src/controllers/joint_space_feedforward_controller.py | MatthiasDR96/industrial_robotics_simulator | 9039e7a581ce97c583c73294e9937664de90530b | [
"MIT"
] | null | null | null | src/controllers/joint_space_feedforward_controller.py | MatthiasDR96/industrial_robotics_simulator | 9039e7a581ce97c583c73294e9937664de90530b | [
"MIT"
] | null | null | null | import math
import numpy as np
""" A controller class which implements a joint feedforward
controller by compensating for the desired acceleration torque and the desired gravity torque."""
| 31.424242 | 111 | 0.62729 | import math
import numpy as np
""" A controller class which implements a joint feedforward
controller by compensating for the desired acceleration torque and the desired gravity torque."""
class Control:
def __init__(self, arm):
# Bind arm
self.arm = arm
# Control type
self.control_type = 'joint'
# Joint space trajectory
self.trajectory_available = False
self.js_trajectory_q = None
self.js_trajectory_dq = None
self.js_trajectory_ddq = None
# External force function
self.external_force_available = False
self.fext_function = None # Not used in this control
# Control parameters
self.kp = 10
self.kd = math.sqrt(self.kp)
self.ki = 0
self.eint = 0
self.qprev = self.arm.q
# Desired states
self.q_des = np.zeros((self.arm.DOF, 1))
self.dq_des = np.zeros((self.arm.DOF, 1))
self.ddq_des = np.zeros((self.arm.DOF, 1))
def set_joint_space_target(self, q_des):
''' Sets a joint position target directly'''
assert np.shape(q_des) == (self.arm.DOF, 1)
self.q_des = q_des
def set_joint_space_trajectory(self, q_des, dq_des, ddq_des):
''' Sets a joint trajectory which the Simulator class will iterate during
simulation and update the desired states in function of time'''
assert np.shape(q_des)[0] == self.arm.DOF + 1
self.trajectory_available = True
self.js_trajectory_q = q_des
self.js_trajectory_dq = dq_des
self.js_trajectory_ddq = ddq_des
def control(self):
''' Implements the control law'''
# Compensation terms
inert = self.arm.inertia(self.q_des)
grav = self.arm.gravity(self.q_des)
# Compute desired torque with inertia and gravity compensation
tau = np.dot(inert, self.ddq_des) + grav
return np.zeros((self.arm.DOF, 1)), np.zeros((self.arm.DOF, 1)), np.zeros((self.arm.DOF, 1)), np.zeros(
(self.arm.DOF, 1)), tau
| 773 | 1,086 | 23 |
203ff959bd2a258325fc3617c59e0ffe1dab56f5 | 27,305 | py | Python | research/a2n/train.py | srihari-humbarwadi/neural-structured-learning | 345b8d644dd7745179263bf6dc9aeb8a921528f4 | [
"Apache-2.0"
] | 939 | 2019-08-28T06:50:30.000Z | 2022-03-30T02:37:07.000Z | research/a2n/train.py | srihari-humbarwadi/neural-structured-learning | 345b8d644dd7745179263bf6dc9aeb8a921528f4 | [
"Apache-2.0"
] | 80 | 2019-09-01T19:47:30.000Z | 2022-02-02T20:38:38.000Z | research/a2n/train.py | srihari-humbarwadi/neural-structured-learning | 345b8d644dd7745179263bf6dc9aeb8a921528f4 | [
"Apache-2.0"
] | 196 | 2019-09-01T19:38:53.000Z | 2022-02-08T01:25:57.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main logic for training the A2N model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import math
import os
from absl import app
from absl import flags
from absl import logging
import clueweb_text_graph
import dataset
import graph
import losses
import metrics
import models
import numpy as np
import slim
from tensorboard.plugins import projector
import tensorflow as tf
from tensorflow.python.training.summary_io import SummaryWriterCache
import text_graph
import utils
FLAGS = flags.FLAGS
flags.DEFINE_string("kg_file", None, "path to kg file")
flags.DEFINE_string("output_dir", None, "output dir for summaries/logs")
flags.DEFINE_string("dev_kg_file", None, "path to dev kg file")
flags.DEFINE_string("test_kg_file", None, "path to test kg file")
flags.DEFINE_string("model_path", None, "path to model if testing only")
flags.DEFINE_boolean("evaluate", False, "run eval loop")
flags.DEFINE_boolean("test_only", False, "if test only")
flags.DEFINE_integer("global_step", None,
"global_step to restore model for testing")
flags.DEFINE_integer("num_epochs", 5, "number of train epochs")
flags.DEFINE_integer("batchsize", 64, "batchsize for training")
flags.DEFINE_integer("test_batchsize", 10, "batchsize for testing")
flags.DEFINE_integer("max_neighbors", None,
"maximum neighbors to use during training")
flags.DEFINE_integer("max_negatives", None,
"maximum number of negative entities to sample")
flags.DEFINE_integer("emb_dim", 100,
"dimension of entity and relation embeddings")
flags.DEFINE_float("entity_encoder_dropout", 1.0,
"dropout for entity embeddings")
flags.DEFINE_float("relation_encoder_dropout", 1.0,
"dropout for relation embeddings")
flags.DEFINE_float("init_entity_encoder_dropout", 1.0,
"dropout for init entity embeddings in attention")
flags.DEFINE_float("attention_encoder_dropout", 1.0,
"dropout for attention encoder")
flags.DEFINE_boolean("use_separate_attention_emb", False,
"use separate entity embeddings for computing attention")
flags.DEFINE_integer("num_parallel_preprocess", 64,
"number of processes to use in dataset preprocessing")
flags.DEFINE_integer("prefetch_examples", 10, "number of examples to prefetch")
flags.DEFINE_integer("shuffle_buffer", 50000,
"buffer for shuffling training examples")
flags.DEFINE_float("learning_rate", 0.001, "learning for optimizer")
flags.DEFINE_float("grad_clip", None, "Clip gradient norm during training")
flags.DEFINE_integer("save_every", 100, "save model every this many steps")
flags.DEFINE_string("entity_names_file", None,
"mapping of Freebase mid to names")
flags.DEFINE_enum("model", "attention",
["distmult", "attention", "source_attention",
"source_rel_attention", "source_path_attention"],
"the model to use")
flags.DEFINE_bool("use_tanh", False, "use tanh non-linearity on embeddings")
flags.DEFINE_enum("attention_type", "bilinear",
["bilinear", "cosine", "sigmoid_bilinear",
"sigmoid_avg_bilinear", "relation"],
"type of attention to use for attention model")
flags.DEFINE_bool("analyze", False, "analyze model")
flags.DEFINE_integer("max_path_length", None,
"maximum path length for path attention models")
flags.DEFINE_string("text_kg_file", None, "path to text data")
flags.DEFINE_integer("max_text_len", None, "max length of text")
flags.DEFINE_integer("max_vocab_size", None, "max number of text words")
flags.DEFINE_integer("min_word_freq", None, "min freq threshold for text words")
flags.DEFINE_integer("max_text_neighbors", None, "max text neighbors")
flags.DEFINE_float("text_encoder_dropout", 1.0, "dropout for text cnn")
flags.DEFINE_list("text_encoder_filter_widths", ["3", "5", "7"],
"filter widths for cnn")
flags.DEFINE_enum("text_encoder_nonlinearity", "tanh", ["relu", "tanh"],
"non-linearity to use for TextCNN")
flags.DEFINE_integer("text_encoder_num_filters", 64, "num filters for cnn")
flags.DEFINE_string("clueweb_sentences", None,
"path to clueweb sentences (or data formatted like cw)")
flags.DEFINE_string("clueweb_data", None,
"path to clueweb data (or data formatted like cw)")
flags.DEFINE_string("clueweb_embeddings", None,
"path to clueweb embeddings (or data formatted like cw)")
flags.DEFINE_integer("text_emb_dim", None, "embedding dim for clueweb text")
flags.DEFINE_integer("subsample_text_rels", None,
"subsample text to max this many per pair")
flags.DEFINE_string("master", "local",
"""BNS name of the TensorFlow master to use.""")
flags.DEFINE_integer("task", 0,
"""Task id of the replica running the training.""")
flags.DEFINE_integer("ps_tasks", 0, """Number of tasks in the ps job.
If 0 no ps job is used.""")
flags.mark_flag_as_required("kg_file")
flags.mark_flag_as_required("output_dir")
def get_train_op(loss, optimizer, grad_clip=None, global_step=None):
"""Make a train_op apply gradients to loss using optimizer.
Args:
loss: the loss function to optimize
optimizer: the optimizer to compute and apply gradients
grad_clip: clip gradient norms by the value supplied (default dont clip)
global_step: tf.placeholder for global_step
Returns:
train_op: the training op to run
grads_and_vars: the gradients and variables for debugging
var_names: the variable names for debugging
capped_grads_and_vars: for debugging
"""
variables = tf.trainable_variables()
grads_and_vars = optimizer.compute_gradients(loss, variables)
var_names = [v.name for v in variables]
logging.info("Trainable variables:")
for var in var_names:
logging.info("\t %s", var)
logging.debug(grads_and_vars)
grad_var_norms = [(tf.global_norm([gv[1]]), tf.global_norm([gv[0]]))
for gv in grads_and_vars]
if grad_clip:
capped_grads_and_vars = [(tf.clip_by_norm(gv[0], grad_clip), gv[1])
for gv in grads_and_vars]
else:
capped_grads_and_vars = grads_and_vars
# norms of gradients for debugging
# grad_norms = [tf.sqrt(tf.reduce_sum(tf.square(grad)))
# for grad, _ in grads_and_vars]
train_op = optimizer.apply_gradients(capped_grads_and_vars,
global_step=global_step)
return train_op, grad_var_norms, var_names, capped_grads_and_vars
def read_graph_data(
kg_file, add_reverse_graph, add_inverse_edge, mode,
num_epochs, batchsize, max_neighbors, max_negatives,
train_graph=None, text_kg_file=None, val_graph=None
):
"""Read graph, create dataset and build model."""
# Read graphs and create datasets
entity_vocab = relation_vocab = None
if train_graph:
entity_vocab = train_graph.entity_vocab
relation_vocab = train_graph.relation_vocab
if FLAGS.clueweb_data and mode == "train":
graph_type = clueweb_text_graph.CWTextGraph
text_kg_file = FLAGS.clueweb_data
elif text_kg_file and mode == "train":
graph_type = text_graph.TextGraph
text_kg_file = FLAGS.text_kg_file
else:
graph_type = graph.Graph
text_kg_file = None
k_graph = graph_type(
text_kg_file=text_kg_file,
skip_new=True,
max_text_len=FLAGS.max_text_len,
max_vocab_size=FLAGS.max_vocab_size,
min_word_freq=FLAGS.min_word_freq,
kg_file=kg_file,
add_reverse_graph=add_reverse_graph,
add_inverse_edge=add_inverse_edge, mode=mode,
entity_vocab=entity_vocab, relation_vocab=relation_vocab,
max_path_length=FLAGS.max_path_length if mode == "train" else None,
embeddings_file=FLAGS.clueweb_embeddings,
sentence_vocab_file=FLAGS.clueweb_sentences,
subsample=FLAGS.subsample_text_rels
)
if FLAGS.text_kg_file:
max_text_len = FLAGS.max_text_len
if mode == "train":
max_text_len = max_text_len or k_graph.max_text_len
elif train_graph:
max_text_len = max_text_len or train_graph.max_text_len
else:
max_text_len = None
k_data = dataset.Dataset(data_graph=k_graph, train_graph=train_graph,
mode=mode, num_epochs=num_epochs,
batchsize=batchsize,
max_neighbors=max_neighbors,
max_negatives=max_negatives,
model_type=FLAGS.model,
max_text_len=max_text_len,
max_text_neighbors=FLAGS.max_text_neighbors,
val_graph=val_graph)
# Create the training data iterator and return the input tensors
# with tf.device("/job:worker"):
k_data.create_dataset_iterator(
num_parallel=FLAGS.num_parallel_preprocess,
prefetch=FLAGS.prefetch_examples,
shuffle_buffer=FLAGS.shuffle_buffer
# , device="worker" if FLAGS.master != "local" else "cpu"
)
return k_graph, k_data
def create_model(train_graph, iterator):
"""Create model and placeholders."""
if FLAGS.clueweb_data:
s, nbrs_s, text_nbrs_s, r, candidates, nbrs_candidates, labels, text_nbrs_s_emb = iterator.get_next()
elif FLAGS.text_kg_file:
s, nbrs_s, text_nbrs_s, r, candidates, nbrs_candidates, labels = \
iterator.get_next()
else:
s, nbrs_s, r, candidates, nbrs_candidates, labels = iterator.get_next()
# Create the attention model, this returns candidates scores and the model
# encoders in a dict for creating feed_dict for all encoders
is_train_ph = tf.placeholder_with_default(True, shape=[],
name="is_train_ph")
if FLAGS.model == "attention":
with tf.variable_scope("attention_model", reuse=False):
candidate_scores, model = models.attention_kbc_model(
FLAGS, train_graph, is_train_ph,
(s, nbrs_s, r, candidates, nbrs_candidates)
)
elif FLAGS.model == "source_attention":
with tf.variable_scope("s_attention_model", reuse=False):
candidate_scores, model = models.source_attention_kbc_model(
FLAGS, train_graph, is_train_ph,
(s, nbrs_s, r, candidates)
)
elif FLAGS.model in ["source_rel_attention", "source_path_attention"]:
if FLAGS.clueweb_data:
input_tensors = (s, nbrs_s, text_nbrs_s, text_nbrs_s_emb, r, candidates)
elif FLAGS.text_kg_file:
input_tensors = (s, nbrs_s, text_nbrs_s, r, candidates)
else:
input_tensors = (s, nbrs_s, r, candidates)
with tf.variable_scope("s_attention_model", reuse=False):
candidate_scores, model = models.source_attention_kbc_model(
FLAGS, train_graph, is_train_ph,
input_tensors, model_type=FLAGS.model
)
elif FLAGS.model == "distmult":
with tf.variable_scope("distmult_model", reuse=False):
candidate_scores, model = models.distmult_kbc_model(
FLAGS, train_graph, is_train_ph,
(s, r, candidates)
)
if FLAGS.clueweb_data:
inputs = (s, nbrs_s, text_nbrs_s, text_nbrs_s_emb,
r, candidates, nbrs_candidates)
elif FLAGS.text_kg_file:
inputs = (s, nbrs_s, text_nbrs_s, r, candidates, nbrs_candidates)
else:
inputs = (s, nbrs_s, r, candidates, nbrs_candidates)
return candidate_scores, candidates, labels, model, is_train_ph, inputs
def evaluate():
"""Run evaluation on dev or test data."""
add_inverse_edge = FLAGS.model in \
["source_rel_attention", "source_path_attention"]
if FLAGS.clueweb_data:
train_graph = clueweb_text_graph.CWTextGraph(
text_kg_file=FLAGS.clueweb_data,
embeddings_file=FLAGS.clueweb_embeddings,
sentence_vocab_file=FLAGS.clueweb_sentences,
skip_new=True,
kg_file=FLAGS.kg_file,
add_reverse_graph=not add_inverse_edge,
add_inverse_edge=add_inverse_edge,
subsample=FLAGS.subsample_text_rels
)
elif FLAGS.text_kg_file:
train_graph = text_graph.TextGraph(
text_kg_file=FLAGS.text_kg_file,
skip_new=True,
max_text_len=FLAGS.max_text_len,
max_vocab_size=FLAGS.max_vocab_size,
min_word_freq=FLAGS.min_word_freq,
kg_file=FLAGS.kg_file,
add_reverse_graph=not add_inverse_edge,
add_inverse_edge=add_inverse_edge,
max_path_length=FLAGS.max_path_length
)
else:
train_graph = graph.Graph(
kg_file=FLAGS.kg_file,
add_reverse_graph=not add_inverse_edge,
add_inverse_edge=add_inverse_edge,
max_path_length=FLAGS.max_path_length
)
# train_graph, _ = read_graph_data(
# kg_file=FLAGS.kg_file,
# add_reverse_graph=(FLAGS.model != "source_rel_attention"),
# add_inverse_edge=(FLAGS.model == "source_rel_attention"),
# mode="train", num_epochs=FLAGS.num_epochs, batchsize=FLAGS.batchsize,
# max_neighbors=FLAGS.max_neighbors,
# max_negatives=FLAGS.max_negatives
# )
val_graph = None
if FLAGS.dev_kg_file:
val_graph, eval_data = read_graph_data(
kg_file=FLAGS.dev_kg_file,
add_reverse_graph=not add_inverse_edge,
add_inverse_edge=add_inverse_edge,
# add_reverse_graph=False,
# add_inverse_edge=False,
mode="dev", num_epochs=1, batchsize=FLAGS.test_batchsize,
max_neighbors=FLAGS.max_neighbors,
max_negatives=FLAGS.max_negatives, train_graph=train_graph,
text_kg_file=FLAGS.text_kg_file
)
if FLAGS.test_kg_file:
_, eval_data = read_graph_data(
kg_file=FLAGS.test_kg_file,
add_reverse_graph=not add_inverse_edge,
add_inverse_edge=add_inverse_edge,
# add_reverse_graph=False,
# add_inverse_edge=False,
mode="test", num_epochs=1, batchsize=FLAGS.test_batchsize,
max_neighbors=FLAGS.max_neighbors,
max_negatives=None, train_graph=train_graph,
text_kg_file=FLAGS.text_kg_file,
val_graph=val_graph
)
if not FLAGS.dev_kg_file and not FLAGS.test_kg_file:
raise ValueError("Evalution without a dev or test file!")
iterator = eval_data.dataset.make_initializable_iterator()
candidate_scores, candidates, labels, model, is_train_ph, inputs = \
create_model(train_graph, iterator)
# Create eval metrics
# if FLAGS.dev_kg_file:
batch_rr = metrics.mrr(candidate_scores, candidates, labels)
mrr, mrr_update = tf.metrics.mean(batch_rr)
mrr_summary = tf.summary.scalar("MRR", mrr)
all_hits, all_hits_update, all_hits_summaries = [], [], []
for k in [1, 3, 10]:
batch_hits = metrics.hits_at_k(candidate_scores, candidates, labels, k=k)
hits, hits_update = tf.metrics.mean(batch_hits)
hits_summary = tf.summary.scalar("Hits_at_%d" % k, hits)
all_hits.append(hits)
all_hits_update.append(hits_update)
all_hits_summaries.append(hits_summary)
hits = tf.group(*all_hits)
hits_update = tf.group(*all_hits_update)
global_step = tf.Variable(0, name="global_step", trainable=False)
current_step = tf.Variable(0, name="current_step", trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES])
incr_current_step = tf.assign_add(current_step, 1)
reset_current_step = tf.assign(current_step, 0)
slim.get_or_create_global_step(graph=tf.get_default_graph())
# best_hits = tf.Variable(0., trainable=False)
# best_step = tf.Variable(0, trainable=False)
# with tf.control_dependencies([hits]):
# update_best_hits = tf.cond(tf.greater(hits, best_hits),
# lambda: tf.assign(best_hits, hits),
# lambda: 0.)
# update_best_step = tf.cond(tf.greater(hits, best_hits),
# lambda: tf.assign(best_step, global_step),
# lambda: 0)
# best_hits_summary = tf.summary.scalar("Best Hits@10", best_hits)
# best_step_summary = tf.summary.scalar("Best Step", best_step)
nexamples = eval_data.data_graph.tuple_store.shape[0]
if eval_data.data_graph.add_reverse_graph:
nexamples *= 2
num_batches = math.ceil(nexamples / float(FLAGS.test_batchsize))
local_init_op = tf.local_variables_initializer()
if FLAGS.analyze:
entity_names = utils.read_entity_name_mapping(FLAGS.entity_names_file)
session = tf.Session()
# summary_writer = tf.summary.FileWriter(FLAGS.output_dir, session.graph)
init_op = tf.global_variables_initializer()
session.run(init_op)
session.run(local_init_op)
saver = tf.train.Saver(tf.trainable_variables())
ckpt_path = FLAGS.model_path + "/model.ckpt-%d" % FLAGS.global_step
attention_probs = model["attention_encoder"].get_from_collection(
"attention_probs"
)
if FLAGS.clueweb_data:
s, nbrs_s, text_nbrs_s, text_nbrs_s_emb, r, candidates, _ = inputs
elif FLAGS.text_kg_file:
s, nbrs_s, text_nbrs_s, r, candidates, _ = inputs
else:
s, nbrs_s, r, candidates, _ = inputs
saver.restore(session, ckpt_path)
session.run(iterator.initializer)
num_attention = 5
nsteps = 0
outf_correct = open(FLAGS.output_dir + "/analyze_correct.txt", "w+")
outf_incorrect = open(
FLAGS.output_dir + "/analyze_incorrect.txt", "w+"
)
ncorrect = 0
analyze_outputs = [candidate_scores, s, nbrs_s, r, candidates, labels,
attention_probs]
if FLAGS.text_kg_file:
analyze_outputs.append(text_nbrs_s)
while True:
try:
analyze_vals = session.run(analyze_outputs, {is_train_ph: False})
if FLAGS.text_kg_file:
cscores, se, nbrs, qr, cands, te, nbr_attention_probs, text_nbrs = \
analyze_vals
else:
cscores, se, nbrs, qr, cands, te, nbr_attention_probs = analyze_vals
# import pdb; pdb.set_trace()
pred_ids = cscores.argmax(1)
for i in range(se.shape[0]):
sname = train_graph.inverse_entity_vocab[se[i]]
if sname in entity_names:
sname = entity_names[sname]
rname = train_graph.inverse_relation_vocab[qr[i]]
pred_target = cands[i, pred_ids[i]]
pred_name = train_graph.inverse_entity_vocab[pred_target]
if pred_name in entity_names:
pred_name = entity_names[pred_name]
tname = train_graph.inverse_entity_vocab[te[i][0]]
if tname in entity_names:
tname = entity_names[tname]
if te[i][0] == pred_target:
outf = outf_correct
ncorrect += 1
else:
outf = outf_incorrect
outf.write("\n(%d) %s, %s, ? \t Pred: %s \t Target: %s" %
(nsteps+i+1, sname, rname, pred_name, tname))
top_nbrs_index = np.argsort(nbr_attention_probs[i, :])[::-1]
outf.write("\nTop Nbrs:")
for j in range(num_attention):
nbr_index = top_nbrs_index[j]
if nbr_index < FLAGS.max_neighbors:
nbr_id = nbrs[i, nbr_index, :]
nbr_name = ""
for k in range(0, nbrs.shape[-1], 2):
ent_name = train_graph.inverse_entity_vocab[nbr_id[k+1]]
if ent_name in entity_names:
ent_name = entity_names[ent_name]
rel_name = train_graph.inverse_relation_vocab[nbr_id[k]]
nbr_name += "(%s, %s)" % (rel_name, ent_name)
else:
# Text Relation
text_nbr_ids = text_nbrs[i, nbr_index - FLAGS.max_neighbors, :]
text_nbr_ent = text_nbr_ids[0]
ent_name = train_graph.inverse_entity_vocab[text_nbr_ent]
if ent_name in entity_names:
ent_name = entity_names[ent_name]
rel_name = train_graph.get_relation_text(text_nbr_ids[1:])
nbr_name = "(%s, %s)" % (rel_name, ent_name)
outf.write("\n\t\t %s Prob: %.4f" %
(nbr_name, nbr_attention_probs[i, nbr_index]))
nsteps += se.shape[0]
tf.logging.info("Current hits@1: %.3f", ncorrect * 1.0 / (nsteps))
except tf.errors.OutOfRangeError:
break
outf_correct.close()
outf_incorrect.close()
return
if FLAGS.test_only:
ckpt_path = FLAGS.model_path + "/model.ckpt-%d" % FLAGS.global_step
slim.evaluation.evaluate_once(
master=FLAGS.master,
checkpoint_path=ckpt_path,
logdir=FLAGS.output_dir,
variables_to_restore=tf.trainable_variables() + [global_step],
initial_op=tf.group(local_init_op, iterator.initializer),
# initial_op=iterator.initializer,
num_evals=num_batches,
eval_op=tf.group(mrr_update, hits_update, incr_current_step),
eval_op_feed_dict={is_train_ph: False},
final_op=tf.group(mrr, hits),
final_op_feed_dict={is_train_ph: False},
summary_op=tf.summary.merge([mrr_summary]+ all_hits_summaries),
hooks=[DataInitHook(),
tf.train.LoggingTensorHook(
{"mrr": mrr, "hits": hits, "step": current_step},
every_n_iter=1
)]
)
else:
slim.evaluation.evaluation_loop(
master=FLAGS.master,
checkpoint_dir=FLAGS.model_path,
logdir=FLAGS.output_dir,
variables_to_restore=tf.trainable_variables() + [global_step],
initial_op=tf.group(local_init_op, iterator.initializer),
# initial_op=iterator.initializer,
num_evals=num_batches,
eval_op=tf.group(mrr_update, hits_update, incr_current_step),
eval_op_feed_dict={is_train_ph: False},
final_op=tf.group(mrr, hits),
final_op_feed_dict={is_train_ph: False},
summary_op=tf.summary.merge([mrr_summary] + all_hits_summaries),
max_number_of_evaluations=None,
eval_interval_secs=60,
hooks=[DataInitHook(),
tf.train.LoggingTensorHook(
{"mrr": mrr, "hits": hits, "step": current_step},
every_n_iter=1
)]
)
def train():
"""Running the main training loop with given parameters."""
if FLAGS.task == 0 and not tf.gfile.Exists(FLAGS.output_dir):
tf.gfile.MakeDirs(FLAGS.output_dir)
# Read train/dev/test graphs, create datasets and model
add_inverse_edge = FLAGS.model in \
["source_rel_attention", "source_path_attention"]
train_graph, train_data = read_graph_data(
kg_file=FLAGS.kg_file,
add_reverse_graph=not add_inverse_edge,
add_inverse_edge=add_inverse_edge,
mode="train",
num_epochs=FLAGS.num_epochs, batchsize=FLAGS.batchsize,
max_neighbors=FLAGS.max_neighbors,
max_negatives=FLAGS.max_negatives,
text_kg_file=FLAGS.text_kg_file
)
worker_device = "/job:{}".format(FLAGS.brain_job_name)
with tf.device(
tf.train.replica_device_setter(
FLAGS.ps_tasks, worker_device=worker_device)):
iterator = train_data.dataset.make_one_shot_iterator()
candidate_scores, _, labels, model, is_train_ph, _ = create_model(
train_graph, iterator
)
# Create train loss and training op
loss = losses.softmax_crossentropy(logits=candidate_scores, labels=labels)
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
global_step = tf.Variable(0, name="global_step", trainable=False)
train_op = get_train_op(loss, optimizer, FLAGS.grad_clip,
global_step=global_step)
tf.summary.scalar("Loss", loss)
run_options = tf.RunOptions(report_tensor_allocations_upon_oom=True)
session_config = tf.ConfigProto(log_device_placement=True)
# Create tf training session
scaffold = tf.train.Scaffold(saver=tf.train.Saver(max_to_keep=1000))
# ckpt_hook = tf.train.CheckpointSaverHook(
# checkpoint_dir=FLAGS.output_dir, scaffold=scaffold,
# save_steps=FLAGS.save_every
# )
# summary_hook = tf.train.SummarySaverHook(
# save_secs=60, output_dir=FLAGS.output_dir,
# summary_op=tf.summary.merge_all()
# )
session = tf.train.MonitoredTrainingSession(
master=FLAGS.master,
is_chief=(FLAGS.task == 0),
checkpoint_dir=FLAGS.output_dir,
save_checkpoint_steps=FLAGS.save_every,
scaffold=scaffold,
save_summaries_secs=60,
# hooks=[summary_hook],
# chief_only_hooks=[ckpt_hook],
config=session_config
)
# Create embeddings visualization
if FLAGS.task == 0:
utils.save_embedding_vocabs(FLAGS.output_dir, train_graph,
FLAGS.entity_names_file)
pconfig = projector.ProjectorConfig()
add_embedding_to_projector(
pconfig, model["entity_encoder"].embeddings.name.split(":")[0],
os.path.join(FLAGS.output_dir, "entity_vocab.tsv")
)
add_embedding_to_projector(
pconfig, model["relation_encoder"].embeddings.name.split(":")[0],
os.path.join(FLAGS.output_dir, "relation_vocab.tsv")
)
if FLAGS.text_kg_file:
word_embeddings = model["text_encoder"].word_embedding_encoder.embeddings
add_embedding_to_projector(
pconfig, word_embeddings.name.split(":")[0],
os.path.join(FLAGS.output_dir, "word_vocab.tsv")
)
projector.visualize_embeddings(
SummaryWriterCache.get(FLAGS.output_dir), pconfig
)
# Main training loop
running_total_loss = 0.
nsteps = 0
gc.collect()
while True:
try:
current_loss, _, _ = session.run(
[loss, train_op, global_step],
# feed_dict={is_train_ph: True, handle: train_iterator_handle},
feed_dict={is_train_ph: True},
options=run_options
)
nsteps += 1
running_total_loss += current_loss
tf.logging.info("Step %d, loss: %.3f, running avg loss: %.3f",
nsteps, current_loss, running_total_loss / nsteps)
if nsteps %2 == 0:
gc.collect()
except tf.errors.OutOfRangeError:
tf.logging.info("End of Traning Epochs after %d steps", nsteps)
break
if __name__ == "__main__":
app.run(main)
| 40.814649 | 105 | 0.680718 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main logic for training the A2N model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import math
import os
from absl import app
from absl import flags
from absl import logging
import clueweb_text_graph
import dataset
import graph
import losses
import metrics
import models
import numpy as np
import slim
from tensorboard.plugins import projector
import tensorflow as tf
from tensorflow.python.training.summary_io import SummaryWriterCache
import text_graph
import utils
FLAGS = flags.FLAGS
flags.DEFINE_string("kg_file", None, "path to kg file")
flags.DEFINE_string("output_dir", None, "output dir for summaries/logs")
flags.DEFINE_string("dev_kg_file", None, "path to dev kg file")
flags.DEFINE_string("test_kg_file", None, "path to test kg file")
flags.DEFINE_string("model_path", None, "path to model if testing only")
flags.DEFINE_boolean("evaluate", False, "run eval loop")
flags.DEFINE_boolean("test_only", False, "if test only")
flags.DEFINE_integer("global_step", None,
"global_step to restore model for testing")
flags.DEFINE_integer("num_epochs", 5, "number of train epochs")
flags.DEFINE_integer("batchsize", 64, "batchsize for training")
flags.DEFINE_integer("test_batchsize", 10, "batchsize for testing")
flags.DEFINE_integer("max_neighbors", None,
"maximum neighbors to use during training")
flags.DEFINE_integer("max_negatives", None,
"maximum number of negative entities to sample")
flags.DEFINE_integer("emb_dim", 100,
"dimension of entity and relation embeddings")
flags.DEFINE_float("entity_encoder_dropout", 1.0,
"dropout for entity embeddings")
flags.DEFINE_float("relation_encoder_dropout", 1.0,
"dropout for relation embeddings")
flags.DEFINE_float("init_entity_encoder_dropout", 1.0,
"dropout for init entity embeddings in attention")
flags.DEFINE_float("attention_encoder_dropout", 1.0,
"dropout for attention encoder")
flags.DEFINE_boolean("use_separate_attention_emb", False,
"use separate entity embeddings for computing attention")
flags.DEFINE_integer("num_parallel_preprocess", 64,
"number of processes to use in dataset preprocessing")
flags.DEFINE_integer("prefetch_examples", 10, "number of examples to prefetch")
flags.DEFINE_integer("shuffle_buffer", 50000,
"buffer for shuffling training examples")
flags.DEFINE_float("learning_rate", 0.001, "learning for optimizer")
flags.DEFINE_float("grad_clip", None, "Clip gradient norm during training")
flags.DEFINE_integer("save_every", 100, "save model every this many steps")
flags.DEFINE_string("entity_names_file", None,
"mapping of Freebase mid to names")
flags.DEFINE_enum("model", "attention",
["distmult", "attention", "source_attention",
"source_rel_attention", "source_path_attention"],
"the model to use")
flags.DEFINE_bool("use_tanh", False, "use tanh non-linearity on embeddings")
flags.DEFINE_enum("attention_type", "bilinear",
["bilinear", "cosine", "sigmoid_bilinear",
"sigmoid_avg_bilinear", "relation"],
"type of attention to use for attention model")
flags.DEFINE_bool("analyze", False, "analyze model")
flags.DEFINE_integer("max_path_length", None,
"maximum path length for path attention models")
flags.DEFINE_string("text_kg_file", None, "path to text data")
flags.DEFINE_integer("max_text_len", None, "max length of text")
flags.DEFINE_integer("max_vocab_size", None, "max number of text words")
flags.DEFINE_integer("min_word_freq", None, "min freq threshold for text words")
flags.DEFINE_integer("max_text_neighbors", None, "max text neighbors")
flags.DEFINE_float("text_encoder_dropout", 1.0, "dropout for text cnn")
flags.DEFINE_list("text_encoder_filter_widths", ["3", "5", "7"],
"filter widths for cnn")
flags.DEFINE_enum("text_encoder_nonlinearity", "tanh", ["relu", "tanh"],
"non-linearity to use for TextCNN")
flags.DEFINE_integer("text_encoder_num_filters", 64, "num filters for cnn")
flags.DEFINE_string("clueweb_sentences", None,
"path to clueweb sentences (or data formatted like cw)")
flags.DEFINE_string("clueweb_data", None,
"path to clueweb data (or data formatted like cw)")
flags.DEFINE_string("clueweb_embeddings", None,
"path to clueweb embeddings (or data formatted like cw)")
flags.DEFINE_integer("text_emb_dim", None, "embedding dim for clueweb text")
flags.DEFINE_integer("subsample_text_rels", None,
"subsample text to max this many per pair")
flags.DEFINE_string("master", "local",
"""BNS name of the TensorFlow master to use.""")
flags.DEFINE_integer("task", 0,
"""Task id of the replica running the training.""")
flags.DEFINE_integer("ps_tasks", 0, """Number of tasks in the ps job.
If 0 no ps job is used.""")
flags.mark_flag_as_required("kg_file")
flags.mark_flag_as_required("output_dir")
def add_embedding_to_projector(projector_config, emb_name, emb_metadata_path):
embedding_conf = projector_config.embeddings.add()
embedding_conf.tensor_name = emb_name
embedding_conf.metadata_path = emb_metadata_path
def get_train_op(loss, optimizer, grad_clip=None, global_step=None):
"""Make a train_op apply gradients to loss using optimizer.
Args:
loss: the loss function to optimize
optimizer: the optimizer to compute and apply gradients
grad_clip: clip gradient norms by the value supplied (default dont clip)
global_step: tf.placeholder for global_step
Returns:
train_op: the training op to run
grads_and_vars: the gradients and variables for debugging
var_names: the variable names for debugging
capped_grads_and_vars: for debugging
"""
variables = tf.trainable_variables()
grads_and_vars = optimizer.compute_gradients(loss, variables)
var_names = [v.name for v in variables]
logging.info("Trainable variables:")
for var in var_names:
logging.info("\t %s", var)
logging.debug(grads_and_vars)
grad_var_norms = [(tf.global_norm([gv[1]]), tf.global_norm([gv[0]]))
for gv in grads_and_vars]
if grad_clip:
capped_grads_and_vars = [(tf.clip_by_norm(gv[0], grad_clip), gv[1])
for gv in grads_and_vars]
else:
capped_grads_and_vars = grads_and_vars
# norms of gradients for debugging
# grad_norms = [tf.sqrt(tf.reduce_sum(tf.square(grad)))
# for grad, _ in grads_and_vars]
train_op = optimizer.apply_gradients(capped_grads_and_vars,
global_step=global_step)
return train_op, grad_var_norms, var_names, capped_grads_and_vars
def read_graph_data(
kg_file, add_reverse_graph, add_inverse_edge, mode,
num_epochs, batchsize, max_neighbors, max_negatives,
train_graph=None, text_kg_file=None, val_graph=None
):
"""Read graph, create dataset and build model."""
# Read graphs and create datasets
entity_vocab = relation_vocab = None
if train_graph:
entity_vocab = train_graph.entity_vocab
relation_vocab = train_graph.relation_vocab
if FLAGS.clueweb_data and mode == "train":
graph_type = clueweb_text_graph.CWTextGraph
text_kg_file = FLAGS.clueweb_data
elif text_kg_file and mode == "train":
graph_type = text_graph.TextGraph
text_kg_file = FLAGS.text_kg_file
else:
graph_type = graph.Graph
text_kg_file = None
k_graph = graph_type(
text_kg_file=text_kg_file,
skip_new=True,
max_text_len=FLAGS.max_text_len,
max_vocab_size=FLAGS.max_vocab_size,
min_word_freq=FLAGS.min_word_freq,
kg_file=kg_file,
add_reverse_graph=add_reverse_graph,
add_inverse_edge=add_inverse_edge, mode=mode,
entity_vocab=entity_vocab, relation_vocab=relation_vocab,
max_path_length=FLAGS.max_path_length if mode == "train" else None,
embeddings_file=FLAGS.clueweb_embeddings,
sentence_vocab_file=FLAGS.clueweb_sentences,
subsample=FLAGS.subsample_text_rels
)
if FLAGS.text_kg_file:
max_text_len = FLAGS.max_text_len
if mode == "train":
max_text_len = max_text_len or k_graph.max_text_len
elif train_graph:
max_text_len = max_text_len or train_graph.max_text_len
else:
max_text_len = None
k_data = dataset.Dataset(data_graph=k_graph, train_graph=train_graph,
mode=mode, num_epochs=num_epochs,
batchsize=batchsize,
max_neighbors=max_neighbors,
max_negatives=max_negatives,
model_type=FLAGS.model,
max_text_len=max_text_len,
max_text_neighbors=FLAGS.max_text_neighbors,
val_graph=val_graph)
# Create the training data iterator and return the input tensors
# with tf.device("/job:worker"):
k_data.create_dataset_iterator(
num_parallel=FLAGS.num_parallel_preprocess,
prefetch=FLAGS.prefetch_examples,
shuffle_buffer=FLAGS.shuffle_buffer
# , device="worker" if FLAGS.master != "local" else "cpu"
)
return k_graph, k_data
def create_model(train_graph, iterator):
"""Create model and placeholders."""
if FLAGS.clueweb_data:
s, nbrs_s, text_nbrs_s, r, candidates, nbrs_candidates, labels, text_nbrs_s_emb = iterator.get_next()
elif FLAGS.text_kg_file:
s, nbrs_s, text_nbrs_s, r, candidates, nbrs_candidates, labels = \
iterator.get_next()
else:
s, nbrs_s, r, candidates, nbrs_candidates, labels = iterator.get_next()
# Create the attention model, this returns candidates scores and the model
# encoders in a dict for creating feed_dict for all encoders
is_train_ph = tf.placeholder_with_default(True, shape=[],
name="is_train_ph")
if FLAGS.model == "attention":
with tf.variable_scope("attention_model", reuse=False):
candidate_scores, model = models.attention_kbc_model(
FLAGS, train_graph, is_train_ph,
(s, nbrs_s, r, candidates, nbrs_candidates)
)
elif FLAGS.model == "source_attention":
with tf.variable_scope("s_attention_model", reuse=False):
candidate_scores, model = models.source_attention_kbc_model(
FLAGS, train_graph, is_train_ph,
(s, nbrs_s, r, candidates)
)
elif FLAGS.model in ["source_rel_attention", "source_path_attention"]:
if FLAGS.clueweb_data:
input_tensors = (s, nbrs_s, text_nbrs_s, text_nbrs_s_emb, r, candidates)
elif FLAGS.text_kg_file:
input_tensors = (s, nbrs_s, text_nbrs_s, r, candidates)
else:
input_tensors = (s, nbrs_s, r, candidates)
with tf.variable_scope("s_attention_model", reuse=False):
candidate_scores, model = models.source_attention_kbc_model(
FLAGS, train_graph, is_train_ph,
input_tensors, model_type=FLAGS.model
)
elif FLAGS.model == "distmult":
with tf.variable_scope("distmult_model", reuse=False):
candidate_scores, model = models.distmult_kbc_model(
FLAGS, train_graph, is_train_ph,
(s, r, candidates)
)
if FLAGS.clueweb_data:
inputs = (s, nbrs_s, text_nbrs_s, text_nbrs_s_emb,
r, candidates, nbrs_candidates)
elif FLAGS.text_kg_file:
inputs = (s, nbrs_s, text_nbrs_s, r, candidates, nbrs_candidates)
else:
inputs = (s, nbrs_s, r, candidates, nbrs_candidates)
return candidate_scores, candidates, labels, model, is_train_ph, inputs
def evaluate():
"""Run evaluation on dev or test data."""
add_inverse_edge = FLAGS.model in \
["source_rel_attention", "source_path_attention"]
if FLAGS.clueweb_data:
train_graph = clueweb_text_graph.CWTextGraph(
text_kg_file=FLAGS.clueweb_data,
embeddings_file=FLAGS.clueweb_embeddings,
sentence_vocab_file=FLAGS.clueweb_sentences,
skip_new=True,
kg_file=FLAGS.kg_file,
add_reverse_graph=not add_inverse_edge,
add_inverse_edge=add_inverse_edge,
subsample=FLAGS.subsample_text_rels
)
elif FLAGS.text_kg_file:
train_graph = text_graph.TextGraph(
text_kg_file=FLAGS.text_kg_file,
skip_new=True,
max_text_len=FLAGS.max_text_len,
max_vocab_size=FLAGS.max_vocab_size,
min_word_freq=FLAGS.min_word_freq,
kg_file=FLAGS.kg_file,
add_reverse_graph=not add_inverse_edge,
add_inverse_edge=add_inverse_edge,
max_path_length=FLAGS.max_path_length
)
else:
train_graph = graph.Graph(
kg_file=FLAGS.kg_file,
add_reverse_graph=not add_inverse_edge,
add_inverse_edge=add_inverse_edge,
max_path_length=FLAGS.max_path_length
)
# train_graph, _ = read_graph_data(
# kg_file=FLAGS.kg_file,
# add_reverse_graph=(FLAGS.model != "source_rel_attention"),
# add_inverse_edge=(FLAGS.model == "source_rel_attention"),
# mode="train", num_epochs=FLAGS.num_epochs, batchsize=FLAGS.batchsize,
# max_neighbors=FLAGS.max_neighbors,
# max_negatives=FLAGS.max_negatives
# )
val_graph = None
if FLAGS.dev_kg_file:
val_graph, eval_data = read_graph_data(
kg_file=FLAGS.dev_kg_file,
add_reverse_graph=not add_inverse_edge,
add_inverse_edge=add_inverse_edge,
# add_reverse_graph=False,
# add_inverse_edge=False,
mode="dev", num_epochs=1, batchsize=FLAGS.test_batchsize,
max_neighbors=FLAGS.max_neighbors,
max_negatives=FLAGS.max_negatives, train_graph=train_graph,
text_kg_file=FLAGS.text_kg_file
)
if FLAGS.test_kg_file:
_, eval_data = read_graph_data(
kg_file=FLAGS.test_kg_file,
add_reverse_graph=not add_inverse_edge,
add_inverse_edge=add_inverse_edge,
# add_reverse_graph=False,
# add_inverse_edge=False,
mode="test", num_epochs=1, batchsize=FLAGS.test_batchsize,
max_neighbors=FLAGS.max_neighbors,
max_negatives=None, train_graph=train_graph,
text_kg_file=FLAGS.text_kg_file,
val_graph=val_graph
)
if not FLAGS.dev_kg_file and not FLAGS.test_kg_file:
raise ValueError("Evalution without a dev or test file!")
iterator = eval_data.dataset.make_initializable_iterator()
candidate_scores, candidates, labels, model, is_train_ph, inputs = \
create_model(train_graph, iterator)
# Create eval metrics
# if FLAGS.dev_kg_file:
batch_rr = metrics.mrr(candidate_scores, candidates, labels)
mrr, mrr_update = tf.metrics.mean(batch_rr)
mrr_summary = tf.summary.scalar("MRR", mrr)
all_hits, all_hits_update, all_hits_summaries = [], [], []
for k in [1, 3, 10]:
batch_hits = metrics.hits_at_k(candidate_scores, candidates, labels, k=k)
hits, hits_update = tf.metrics.mean(batch_hits)
hits_summary = tf.summary.scalar("Hits_at_%d" % k, hits)
all_hits.append(hits)
all_hits_update.append(hits_update)
all_hits_summaries.append(hits_summary)
hits = tf.group(*all_hits)
hits_update = tf.group(*all_hits_update)
global_step = tf.Variable(0, name="global_step", trainable=False)
current_step = tf.Variable(0, name="current_step", trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES])
incr_current_step = tf.assign_add(current_step, 1)
reset_current_step = tf.assign(current_step, 0)
slim.get_or_create_global_step(graph=tf.get_default_graph())
# best_hits = tf.Variable(0., trainable=False)
# best_step = tf.Variable(0, trainable=False)
# with tf.control_dependencies([hits]):
# update_best_hits = tf.cond(tf.greater(hits, best_hits),
# lambda: tf.assign(best_hits, hits),
# lambda: 0.)
# update_best_step = tf.cond(tf.greater(hits, best_hits),
# lambda: tf.assign(best_step, global_step),
# lambda: 0)
# best_hits_summary = tf.summary.scalar("Best Hits@10", best_hits)
# best_step_summary = tf.summary.scalar("Best Step", best_step)
nexamples = eval_data.data_graph.tuple_store.shape[0]
if eval_data.data_graph.add_reverse_graph:
nexamples *= 2
num_batches = math.ceil(nexamples / float(FLAGS.test_batchsize))
local_init_op = tf.local_variables_initializer()
if FLAGS.analyze:
entity_names = utils.read_entity_name_mapping(FLAGS.entity_names_file)
session = tf.Session()
# summary_writer = tf.summary.FileWriter(FLAGS.output_dir, session.graph)
init_op = tf.global_variables_initializer()
session.run(init_op)
session.run(local_init_op)
saver = tf.train.Saver(tf.trainable_variables())
ckpt_path = FLAGS.model_path + "/model.ckpt-%d" % FLAGS.global_step
attention_probs = model["attention_encoder"].get_from_collection(
"attention_probs"
)
if FLAGS.clueweb_data:
s, nbrs_s, text_nbrs_s, text_nbrs_s_emb, r, candidates, _ = inputs
elif FLAGS.text_kg_file:
s, nbrs_s, text_nbrs_s, r, candidates, _ = inputs
else:
s, nbrs_s, r, candidates, _ = inputs
saver.restore(session, ckpt_path)
session.run(iterator.initializer)
num_attention = 5
nsteps = 0
outf_correct = open(FLAGS.output_dir + "/analyze_correct.txt", "w+")
outf_incorrect = open(
FLAGS.output_dir + "/analyze_incorrect.txt", "w+"
)
ncorrect = 0
analyze_outputs = [candidate_scores, s, nbrs_s, r, candidates, labels,
attention_probs]
if FLAGS.text_kg_file:
analyze_outputs.append(text_nbrs_s)
while True:
try:
analyze_vals = session.run(analyze_outputs, {is_train_ph: False})
if FLAGS.text_kg_file:
cscores, se, nbrs, qr, cands, te, nbr_attention_probs, text_nbrs = \
analyze_vals
else:
cscores, se, nbrs, qr, cands, te, nbr_attention_probs = analyze_vals
# import pdb; pdb.set_trace()
pred_ids = cscores.argmax(1)
for i in range(se.shape[0]):
sname = train_graph.inverse_entity_vocab[se[i]]
if sname in entity_names:
sname = entity_names[sname]
rname = train_graph.inverse_relation_vocab[qr[i]]
pred_target = cands[i, pred_ids[i]]
pred_name = train_graph.inverse_entity_vocab[pred_target]
if pred_name in entity_names:
pred_name = entity_names[pred_name]
tname = train_graph.inverse_entity_vocab[te[i][0]]
if tname in entity_names:
tname = entity_names[tname]
if te[i][0] == pred_target:
outf = outf_correct
ncorrect += 1
else:
outf = outf_incorrect
outf.write("\n(%d) %s, %s, ? \t Pred: %s \t Target: %s" %
(nsteps+i+1, sname, rname, pred_name, tname))
top_nbrs_index = np.argsort(nbr_attention_probs[i, :])[::-1]
outf.write("\nTop Nbrs:")
for j in range(num_attention):
nbr_index = top_nbrs_index[j]
if nbr_index < FLAGS.max_neighbors:
nbr_id = nbrs[i, nbr_index, :]
nbr_name = ""
for k in range(0, nbrs.shape[-1], 2):
ent_name = train_graph.inverse_entity_vocab[nbr_id[k+1]]
if ent_name in entity_names:
ent_name = entity_names[ent_name]
rel_name = train_graph.inverse_relation_vocab[nbr_id[k]]
nbr_name += "(%s, %s)" % (rel_name, ent_name)
else:
# Text Relation
text_nbr_ids = text_nbrs[i, nbr_index - FLAGS.max_neighbors, :]
text_nbr_ent = text_nbr_ids[0]
ent_name = train_graph.inverse_entity_vocab[text_nbr_ent]
if ent_name in entity_names:
ent_name = entity_names[ent_name]
rel_name = train_graph.get_relation_text(text_nbr_ids[1:])
nbr_name = "(%s, %s)" % (rel_name, ent_name)
outf.write("\n\t\t %s Prob: %.4f" %
(nbr_name, nbr_attention_probs[i, nbr_index]))
nsteps += se.shape[0]
tf.logging.info("Current hits@1: %.3f", ncorrect * 1.0 / (nsteps))
except tf.errors.OutOfRangeError:
break
outf_correct.close()
outf_incorrect.close()
return
class DataInitHook(tf.train.SessionRunHook):
def after_create_session(self, sess, coord):
sess.run(iterator.initializer)
sess.run(reset_current_step)
if FLAGS.test_only:
ckpt_path = FLAGS.model_path + "/model.ckpt-%d" % FLAGS.global_step
slim.evaluation.evaluate_once(
master=FLAGS.master,
checkpoint_path=ckpt_path,
logdir=FLAGS.output_dir,
variables_to_restore=tf.trainable_variables() + [global_step],
initial_op=tf.group(local_init_op, iterator.initializer),
# initial_op=iterator.initializer,
num_evals=num_batches,
eval_op=tf.group(mrr_update, hits_update, incr_current_step),
eval_op_feed_dict={is_train_ph: False},
final_op=tf.group(mrr, hits),
final_op_feed_dict={is_train_ph: False},
summary_op=tf.summary.merge([mrr_summary]+ all_hits_summaries),
hooks=[DataInitHook(),
tf.train.LoggingTensorHook(
{"mrr": mrr, "hits": hits, "step": current_step},
every_n_iter=1
)]
)
else:
slim.evaluation.evaluation_loop(
master=FLAGS.master,
checkpoint_dir=FLAGS.model_path,
logdir=FLAGS.output_dir,
variables_to_restore=tf.trainable_variables() + [global_step],
initial_op=tf.group(local_init_op, iterator.initializer),
# initial_op=iterator.initializer,
num_evals=num_batches,
eval_op=tf.group(mrr_update, hits_update, incr_current_step),
eval_op_feed_dict={is_train_ph: False},
final_op=tf.group(mrr, hits),
final_op_feed_dict={is_train_ph: False},
summary_op=tf.summary.merge([mrr_summary] + all_hits_summaries),
max_number_of_evaluations=None,
eval_interval_secs=60,
hooks=[DataInitHook(),
tf.train.LoggingTensorHook(
{"mrr": mrr, "hits": hits, "step": current_step},
every_n_iter=1
)]
)
def train():
"""Running the main training loop with given parameters."""
if FLAGS.task == 0 and not tf.gfile.Exists(FLAGS.output_dir):
tf.gfile.MakeDirs(FLAGS.output_dir)
# Read train/dev/test graphs, create datasets and model
add_inverse_edge = FLAGS.model in \
["source_rel_attention", "source_path_attention"]
train_graph, train_data = read_graph_data(
kg_file=FLAGS.kg_file,
add_reverse_graph=not add_inverse_edge,
add_inverse_edge=add_inverse_edge,
mode="train",
num_epochs=FLAGS.num_epochs, batchsize=FLAGS.batchsize,
max_neighbors=FLAGS.max_neighbors,
max_negatives=FLAGS.max_negatives,
text_kg_file=FLAGS.text_kg_file
)
worker_device = "/job:{}".format(FLAGS.brain_job_name)
with tf.device(
tf.train.replica_device_setter(
FLAGS.ps_tasks, worker_device=worker_device)):
iterator = train_data.dataset.make_one_shot_iterator()
candidate_scores, _, labels, model, is_train_ph, _ = create_model(
train_graph, iterator
)
# Create train loss and training op
loss = losses.softmax_crossentropy(logits=candidate_scores, labels=labels)
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
global_step = tf.Variable(0, name="global_step", trainable=False)
train_op = get_train_op(loss, optimizer, FLAGS.grad_clip,
global_step=global_step)
tf.summary.scalar("Loss", loss)
run_options = tf.RunOptions(report_tensor_allocations_upon_oom=True)
session_config = tf.ConfigProto(log_device_placement=True)
# Create tf training session
scaffold = tf.train.Scaffold(saver=tf.train.Saver(max_to_keep=1000))
# ckpt_hook = tf.train.CheckpointSaverHook(
# checkpoint_dir=FLAGS.output_dir, scaffold=scaffold,
# save_steps=FLAGS.save_every
# )
# summary_hook = tf.train.SummarySaverHook(
# save_secs=60, output_dir=FLAGS.output_dir,
# summary_op=tf.summary.merge_all()
# )
session = tf.train.MonitoredTrainingSession(
master=FLAGS.master,
is_chief=(FLAGS.task == 0),
checkpoint_dir=FLAGS.output_dir,
save_checkpoint_steps=FLAGS.save_every,
scaffold=scaffold,
save_summaries_secs=60,
# hooks=[summary_hook],
# chief_only_hooks=[ckpt_hook],
config=session_config
)
# Create embeddings visualization
if FLAGS.task == 0:
utils.save_embedding_vocabs(FLAGS.output_dir, train_graph,
FLAGS.entity_names_file)
pconfig = projector.ProjectorConfig()
add_embedding_to_projector(
pconfig, model["entity_encoder"].embeddings.name.split(":")[0],
os.path.join(FLAGS.output_dir, "entity_vocab.tsv")
)
add_embedding_to_projector(
pconfig, model["relation_encoder"].embeddings.name.split(":")[0],
os.path.join(FLAGS.output_dir, "relation_vocab.tsv")
)
if FLAGS.text_kg_file:
word_embeddings = model["text_encoder"].word_embedding_encoder.embeddings
add_embedding_to_projector(
pconfig, word_embeddings.name.split(":")[0],
os.path.join(FLAGS.output_dir, "word_vocab.tsv")
)
projector.visualize_embeddings(
SummaryWriterCache.get(FLAGS.output_dir), pconfig
)
# Main training loop
running_total_loss = 0.
nsteps = 0
gc.collect()
while True:
try:
current_loss, _, _ = session.run(
[loss, train_op, global_step],
# feed_dict={is_train_ph: True, handle: train_iterator_handle},
feed_dict={is_train_ph: True},
options=run_options
)
nsteps += 1
running_total_loss += current_loss
tf.logging.info("Step %d, loss: %.3f, running avg loss: %.3f",
nsteps, current_loss, running_total_loss / nsteps)
if nsteps %2 == 0:
gc.collect()
except tf.errors.OutOfRangeError:
tf.logging.info("End of Traning Epochs after %d steps", nsteps)
break
def main(argv):
del argv
if FLAGS.test_only or FLAGS.evaluate or FLAGS.analyze:
evaluate()
else:
train()
if __name__ == "__main__":
app.run(main)
| 393 | 23 | 98 |
82acf4203f61f66b6891f1f5647271a4ce386876 | 1,414 | py | Python | Betsy/Betsy/modules/cluster_genes_by_kmeans.py | jefftc/changlab | 11da8c415afefcba0b0216238387c75aeb3a56ac | [
"MIT"
] | 9 | 2017-01-13T02:38:41.000Z | 2021-04-08T00:44:39.000Z | Betsy/Betsy/modules/cluster_genes_by_kmeans.py | jefftc/changlab | 11da8c415afefcba0b0216238387c75aeb3a56ac | [
"MIT"
] | null | null | null | Betsy/Betsy/modules/cluster_genes_by_kmeans.py | jefftc/changlab | 11da8c415afefcba0b0216238387c75aeb3a56ac | [
"MIT"
] | 4 | 2017-01-05T16:25:25.000Z | 2019-12-12T20:07:38.000Z | from Module import AbstractModule
| 31.422222 | 74 | 0.630127 | from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, in_data, out_attributes, user_options, num_cores,
out_path):
import os
import shutil
from genomicode import filelib
from Betsy import module_utils as mlib
import cluster_genes_by_hierarchical as clust
filelib.safe_mkdir(out_path)
metadata = {}
kmeans_k = mlib.get_user_option(
user_options, "kmeans_k", not_empty=True, type=int)
assert kmeans_k >= 2 and kmeans_k < 100
x = clust.run_cluster30(
in_data.identifier, "kmeans", user_options, kmeans_k=kmeans_k)
cmd, cluster_files = x
metadata["command"] = cmd
opj = os.path.join
out_cdt_file = opj(out_path, "signal.cdt")
out_kag_file = opj(out_path, "array_cluster.kag")
out_kgg_file = opj(out_path, "gene_cluster.kgg")
assert "cdt" in cluster_files
shutil.copy2(cluster_files["cdt"], out_cdt_file)
if "kag" in cluster_files:
shutil.copy2(cluster_files["kag"], out_kag_file)
if "kgg" in cluster_files:
shutil.copy2(cluster_files["kgg"], out_kgg_file)
return metadata
def name_outfile(self, antecedents, user_options):
return "cluster"
| 1,268 | 8 | 103 |
8276054130b6223eb6e4eaa348d70760cd57a8e8 | 1,419 | py | Python | scrapfishin/queries.py | SupahNoob/scrapfishin | 8163ee40a348ff45d2dd0384acf948c4fff87fa3 | [
"MIT"
] | null | null | null | scrapfishin/queries.py | SupahNoob/scrapfishin | 8163ee40a348ff45d2dd0384acf948c4fff87fa3 | [
"MIT"
] | null | null | null | scrapfishin/queries.py | SupahNoob/scrapfishin | 8163ee40a348ff45d2dd0384acf948c4fff87fa3 | [
"MIT"
] | null | null | null | from typing import Iterable
import sqlalchemy as sa
from scrapfishin.models import Recipe
def grocery_list(
s: sa.orm.Session,
recipes: Iterable[Recipe]
) -> str:
"""
Format an iterable of Recipes into a Grocery List.
Parameters
----------
s : sqlalchemy.orm.Session
database session to bind objects
recipes : [Recipe]
list of recipes to shop for
Returns
-------
grocery_page : str
page of sorted ingredients
"""
seen = []
for r in recipes:
for i in r.ingredient_amounts:
unit = f'{i.measurement.unit} of {i.ingredient.food}'
amount = i.amount
try:
existing = next(s for s in seen if unit in s)
except StopIteration:
pass
else:
amount += float(existing.split(' ')[0])
seen.remove(existing)
seen.append(f'{amount} {unit}')
return '\n'.join(sorted(seen, key=lambda i: i.split(' of ')[-1]))
def random_recipe(s: sa.orm.Session, *, n: int=1) -> Iterable(Recipe):
"""
Get `n` random recipes.
Parameters
----------
s : sqlalchemy.orm.Session
database session to bind objects
n : int = [default: 1]
number of recipes to return
"""
q = s.query(Recipe)\
.order_by(sa.func.random())\
.limit(n)
return iter(q.all())
| 21.830769 | 70 | 0.549683 | from typing import Iterable
import sqlalchemy as sa
from scrapfishin.models import Recipe
def grocery_list(
s: sa.orm.Session,
recipes: Iterable[Recipe]
) -> str:
"""
Format an iterable of Recipes into a Grocery List.
Parameters
----------
s : sqlalchemy.orm.Session
database session to bind objects
recipes : [Recipe]
list of recipes to shop for
Returns
-------
grocery_page : str
page of sorted ingredients
"""
seen = []
for r in recipes:
for i in r.ingredient_amounts:
unit = f'{i.measurement.unit} of {i.ingredient.food}'
amount = i.amount
try:
existing = next(s for s in seen if unit in s)
except StopIteration:
pass
else:
amount += float(existing.split(' ')[0])
seen.remove(existing)
seen.append(f'{amount} {unit}')
return '\n'.join(sorted(seen, key=lambda i: i.split(' of ')[-1]))
def random_recipe(s: sa.orm.Session, *, n: int=1) -> Iterable(Recipe):
"""
Get `n` random recipes.
Parameters
----------
s : sqlalchemy.orm.Session
database session to bind objects
n : int = [default: 1]
number of recipes to return
"""
q = s.query(Recipe)\
.order_by(sa.func.random())\
.limit(n)
return iter(q.all())
| 0 | 0 | 0 |
9ab7a8ab803317414b2c748a2679e0808435230a | 2,345 | py | Python | ndg/security/server/test/config/attributeauthority/sitea/attributeauthorityapp.py | cedadev/ndg_security_server | 6873cc0de1a01ad05ddcbeb3f074a33923dc1ca1 | [
"BSD-3-Clause"
] | null | null | null | ndg/security/server/test/config/attributeauthority/sitea/attributeauthorityapp.py | cedadev/ndg_security_server | 6873cc0de1a01ad05ddcbeb3f074a33923dc1ca1 | [
"BSD-3-Clause"
] | null | null | null | ndg/security/server/test/config/attributeauthority/sitea/attributeauthorityapp.py | cedadev/ndg_security_server | 6873cc0de1a01ad05ddcbeb3f074a33923dc1ca1 | [
"BSD-3-Clause"
] | 1 | 2017-12-05T17:31:08.000Z | 2017-12-05T17:31:08.000Z | #!/usr/bin/env python
"""NDG Security Attribute Authority test harness for unit test site 'A'
NERC Data Grid Project
"""
__author__ = "P J Kershaw"
__date__ = "24/09/08"
__copyright__ = "(C) 2009 Science and Technology Facilities Council"
__contact__ = "Philip.Kershaw@stfc.ac.uk"
__revision__ = "$Id$"
from os import path
import optparse
from paste.script.util.logging_config import fileConfig
from paste.deploy import loadapp
from ndg.security.server.utils.wsgi_utils import GunicornServerApp
from ndg.security.server.test.base import NDGSEC_TEST_CONFIG_DIR
INI_FILENAME = 'attribute-service.ini'
CFG_FILEPATH = path.join(path.dirname(path.abspath(__file__)), INI_FILENAME)
if __name__ == '__main__':
def_cert_filepath = path.join(NDGSEC_TEST_CONFIG_DIR,
'pki',
'localhost.crt')
def_prikey_filepath = path.join(NDGSEC_TEST_CONFIG_DIR,
'pki',
'localhost.key')
parser = optparse.OptionParser()
parser.add_option("-p",
"--port",
dest="port",
default=5443,
type='int',
help="port number to run under")
parser.add_option("-c",
"--cert-file",
dest='cert_filepath',
default=def_cert_filepath,
help="SSL Certificate file")
parser.add_option("-k",
"--private-key-file",
dest='prikey_filepath',
default=def_prikey_filepath,
help="SSL private key file")
parser.add_option("-f",
"--conf",
dest="config_filepath",
default=CFG_FILEPATH,
help="Configuration file path")
opt = parser.parse_args()[0]
dir_name = path.dirname(__file__)
options = {
'bind': '{}:{}'.format('127.0.0.1', opt.port),
'keyfile': opt.prikey_filepath,
'certfile': opt.cert_filepath
}
fileConfig(opt.config_filepath)
app = loadapp('config:%s' % opt.config_filepath)
gunicorn_server_app = GunicornServerApp(app, options)
gunicorn_server_app.run() | 33.028169 | 76 | 0.557356 | #!/usr/bin/env python
"""NDG Security Attribute Authority test harness for unit test site 'A'
NERC Data Grid Project
"""
__author__ = "P J Kershaw"
__date__ = "24/09/08"
__copyright__ = "(C) 2009 Science and Technology Facilities Council"
__contact__ = "Philip.Kershaw@stfc.ac.uk"
__revision__ = "$Id$"
from os import path
import optparse
from paste.script.util.logging_config import fileConfig
from paste.deploy import loadapp
from ndg.security.server.utils.wsgi_utils import GunicornServerApp
from ndg.security.server.test.base import NDGSEC_TEST_CONFIG_DIR
INI_FILENAME = 'attribute-service.ini'
CFG_FILEPATH = path.join(path.dirname(path.abspath(__file__)), INI_FILENAME)
if __name__ == '__main__':
def_cert_filepath = path.join(NDGSEC_TEST_CONFIG_DIR,
'pki',
'localhost.crt')
def_prikey_filepath = path.join(NDGSEC_TEST_CONFIG_DIR,
'pki',
'localhost.key')
parser = optparse.OptionParser()
parser.add_option("-p",
"--port",
dest="port",
default=5443,
type='int',
help="port number to run under")
parser.add_option("-c",
"--cert-file",
dest='cert_filepath',
default=def_cert_filepath,
help="SSL Certificate file")
parser.add_option("-k",
"--private-key-file",
dest='prikey_filepath',
default=def_prikey_filepath,
help="SSL private key file")
parser.add_option("-f",
"--conf",
dest="config_filepath",
default=CFG_FILEPATH,
help="Configuration file path")
opt = parser.parse_args()[0]
dir_name = path.dirname(__file__)
options = {
'bind': '{}:{}'.format('127.0.0.1', opt.port),
'keyfile': opt.prikey_filepath,
'certfile': opt.cert_filepath
}
fileConfig(opt.config_filepath)
app = loadapp('config:%s' % opt.config_filepath)
gunicorn_server_app = GunicornServerApp(app, options)
gunicorn_server_app.run() | 0 | 0 | 0 |
688e52d3ed32eaeadd847a7afafdb32a45017795 | 2,836 | py | Python | clients/python/tyckiting_client/ai/stettin.py | CarstenWalther/space-tyckiting | 8398f080332c78c7f246289947fdda49558e0f12 | [
"MIT"
] | 1 | 2017-02-04T14:13:44.000Z | 2017-02-04T14:13:44.000Z | clients/python/tyckiting_client/ai/stettin.py | CarstenWalther/space-tyckiting | 8398f080332c78c7f246289947fdda49558e0f12 | [
"MIT"
] | null | null | null | clients/python/tyckiting_client/ai/stettin.py | CarstenWalther/space-tyckiting | 8398f080332c78c7f246289947fdda49558e0f12 | [
"MIT"
] | null | null | null | import random
from tyckiting_client.ai import base
from tyckiting_client import actions
from tyckiting_client.ai.strategies import pipelineEscaping
from tyckiting_client.ai.strategies import scanning
from tyckiting_client.ai.strategies import uncertainTracking
'''
Rules:
like robin but in certain situations endangered bots stay and do an other action
'''
STAY_PROB = 0.25
| 34.585366 | 94 | 0.627997 | import random
from tyckiting_client.ai import base
from tyckiting_client import actions
from tyckiting_client.ai.strategies import pipelineEscaping
from tyckiting_client.ai.strategies import scanning
from tyckiting_client.ai.strategies import uncertainTracking
'''
Rules:
like robin but in certain situations endangered bots stay and do an other action
'''
STAY_PROB = 0.25
class Ai(base.BaseAi):
def __init__(self, team_id, config=None):
super(Ai, self).__init__(team_id, config)
self.escaping = pipelineEscaping.PipelineEscapingAdvanced(self.config)
self.scanning = scanning.StatisticalScanning(self.config)
self.tracking = uncertainTracking.UncertainTracker(uncertainTracking.BALANCED_PATTERN)
def move(self, bots, events):
response = []
endangered = self.getEndangeredBots(events)
livingCount = self.livingBotCount(bots)
target = self.tracking.getTarget()
botsToMove = []
for endangeredBot in endangered:
if random.uniform(0,1) > STAY_PROB:
botsToMove.append(endangeredBot)
available = livingCount - len(botsToMove)
positionsNextRound = self.doPositioning(botsToMove, response, bots, target)
pendingTrackScan = False
if target and available >= 2:
pendingTrackScan = True
available -= 1
shooting = True
shootCoords = self.tracking.getShootCoordinates(available, positionsNextRound)
if not shootCoords:
shooting = False
scanCoords = self.scanning.getPossibleScanPositions(available)
for bot in bots:
if not bot.alive:
continue
if bot.bot_id in botsToMove:
continue
elif pendingTrackScan:
action = actions.Radar(bot_id=bot.bot_id, x=target[0], y=target[1])
pendingTrackScan = False
elif shooting:
shootCoord = shootCoords.pop()
action = actions.Cannon(bot_id=bot.bot_id, x=shootCoord[0], y=shootCoord[1])
else:
scanCoord = scanCoords.pop()
action = actions.Radar(bot_id=bot.bot_id, x=scanCoord[0], y=scanCoord[1])
response.append(action)
return response
def doPositioning(self, botsToMove, response, bots, target):
positions = []
for bot in bots:
if not bot.alive:
continue
if bot.bot_id in botsToMove:
newPos = self.escaping.getMove(bot, bots, target)
action = actions.Move(bot_id=bot.bot_id, x=newPos.x, y=newPos.y)
response.append(action)
positions.append(newPos)
else:
positions.append(bot.pos)
return positions
| 2,353 | 1 | 104 |
8a8b53191890167d24f5bf11c407382590852801 | 2,581 | py | Python | faces.py | Sxela/PythonSnippets | 6a91e2c6080330195aedc04c7b9c36636cb488ff | [
"MIT"
] | null | null | null | faces.py | Sxela/PythonSnippets | 6a91e2c6080330195aedc04c7b9c36636cb488ff | [
"MIT"
] | null | null | null | faces.py | Sxela/PythonSnippets | 6a91e2c6080330195aedc04c7b9c36636cb488ff | [
"MIT"
] | 1 | 2021-11-20T06:26:19.000Z | 2021-11-20T06:26:19.000Z | """
Code related to face detection and manipulation
"""
#pip install facenet_pytorch
from facenet_pytorch import MTCNN
mtcnn = MTCNN(image_size=256, margin=80)
# simplest ye olde trustworthy MTCNN for face detection with landmarks
# my version of isOdd, should make a separate repo for it :D
# the actual scaler function
"""
A useful scaler algorithm, based on face detection.
Takes PIL.Image, returns a uniformly scaled PIL.Image
boxes: a list of detected bboxes
_img: PIL.Image
max_res: maximum pixel area to fit into. Use to stay below the VRAM limits of your GPU.
target_face: desired face size. Upscale or downscale the whole image to fit the detected face into that dimension.
fixed_ratio: fixed scale. Ignores the face size, but doesn't ignore the max_res limit.
max_upscale: maximum upscale ratio. Prevents from scaling images with tiny faces to a blurry mess.
"""
| 31.096386 | 118 | 0.678032 | """
Code related to face detection and manipulation
"""
#pip install facenet_pytorch
from facenet_pytorch import MTCNN
mtcnn = MTCNN(image_size=256, margin=80)
# simplest ye olde trustworthy MTCNN for face detection with landmarks
def detect(img):
# Detect faces
batch_boxes, batch_probs, batch_points = mtcnn.detect(img, landmarks=True)
# Select faces
if not mtcnn.keep_all:
batch_boxes, batch_probs, batch_points = mtcnn.select_boxes(
batch_boxes, batch_probs, batch_points, img, method=mtcnn.selection_method
)
return batch_boxes, batch_points
# my version of isOdd, should make a separate repo for it :D
def makeEven(_x):
return _x if (_x % 2 == 0) else _x+1
# the actual scaler function
def scale(boxes, _img, max_res=1_500_000, target_face=256, fixed_ratio=0, max_upscale=2, VERBOSE=False):
x, y = _img.size
ratio = 2 #initial ratio
#scale to desired face size
if (boxes is not None):
if len(boxes)>0:
ratio = target_face/max(boxes[0][2:]-boxes[0][:2]);
ratio = min(ratio, max_upscale)
if VERBOSE: print('up by', ratio)
if fixed_ratio>0:
if VERBOSE: print('fixed ratio')
ratio = fixed_ratio
x*=ratio
y*=ratio
#downscale to fit into max res
res = x*y
if res > max_res:
ratio = pow(res/max_res,1/2);
if VERBOSE: print(ratio)
x=int(x/ratio)
y=int(y/ratio)
#make dimensions even, because usually NNs fail on uneven dimensions due skip connection size mismatch
x = makeEven(int(x))
y = makeEven(int(y))
size = (x, y)
return _img.resize(size)
"""
A useful scaler algorithm, based on face detection.
Takes PIL.Image, returns a uniformly scaled PIL.Image
boxes: a list of detected bboxes
_img: PIL.Image
max_res: maximum pixel area to fit into. Use to stay below the VRAM limits of your GPU.
target_face: desired face size. Upscale or downscale the whole image to fit the detected face into that dimension.
fixed_ratio: fixed scale. Ignores the face size, but doesn't ignore the max_res limit.
max_upscale: maximum upscale ratio. Prevents from scaling images with tiny faces to a blurry mess.
"""
def scale_by_face_size(_img, max_res=1_500_000, target_face=256, fix_ratio=0, max_upscale=2, VERBOSE=False):
boxes = None
boxes, _ = detect(_img)
if VERBOSE: print('boxes',boxes)
img_resized = scale(boxes, _img, max_res, target_face, fix_ratio, max_upscale, VERBOSE)
return img_resized
| 1,574 | 0 | 89 |
719922355fdc1bd4f77e6d2055a74f97ffa8ac71 | 621 | py | Python | src/ansys/templates/python/pyace_flask/{{cookiecutter.__project_name_slug}}/src/blueprints/health.py | pyansys/pyansys-templates | dbbcc7e89c1014bb68e065bf70af30c10ecfadb6 | [
"MIT"
] | null | null | null | src/ansys/templates/python/pyace_flask/{{cookiecutter.__project_name_slug}}/src/blueprints/health.py | pyansys/pyansys-templates | dbbcc7e89c1014bb68e065bf70af30c10ecfadb6 | [
"MIT"
] | 16 | 2022-03-18T09:17:10.000Z | 2022-03-28T06:52:05.000Z | src/ansys/templates/python/pyace_flask/{{cookiecutter.__project_name_slug}}/src/blueprints/health.py | pyansys/ansys-templates | f7def562e23c5c8db51c17d56a7c34f62f77077d | [
"MIT"
] | 1 | 2022-03-16T18:23:12.000Z | 2022-03-16T18:23:12.000Z | {%- if cookiecutter.copyright != "None" -%}
# Copyright (c) {% now "utc", '%Y' %}, {{ cookiecutter.copyright }}. Unauthorised use, distribution or duplication is prohibited
{% endif %}
"""
{{ cookiecutter.project_name }}.
{{ cookiecutter.library_name }}
"""
from flask import Blueprint, jsonify
from observability.logger import Logger
blueprint = Blueprint("health_check", __name__, url_prefix="/api/health")
logger = Logger.init("{{ cookiecutter.__project_name_slug }}")
@blueprint.route("/")
def health_check():
"""Check health status."""
logger.info("Health check")
return jsonify({"status": "ok"})
| 25.875 | 128 | 0.689211 | {%- if cookiecutter.copyright != "None" -%}
# Copyright (c) {% now "utc", '%Y' %}, {{ cookiecutter.copyright }}. Unauthorised use, distribution or duplication is prohibited
{% endif %}
"""
{{ cookiecutter.project_name }}.
{{ cookiecutter.library_name }}
"""
from flask import Blueprint, jsonify
from observability.logger import Logger
blueprint = Blueprint("health_check", __name__, url_prefix="/api/health")
logger = Logger.init("{{ cookiecutter.__project_name_slug }}")
@blueprint.route("/")
def health_check():
"""Check health status."""
logger.info("Health check")
return jsonify({"status": "ok"})
| 0 | 0 | 0 |
6fc60e43f1a5c92d6bfc4dc8ba0dc775c230b8ca | 9,953 | py | Python | tests/test_news.py | Paule3569/feuersoftware | fe2bc9f71a45d4b9232df8d3fe8d50239c775296 | [
"MIT"
] | 1 | 2021-04-29T10:57:48.000Z | 2021-04-29T10:57:48.000Z | tests/test_news.py | Paule3569/feuersoftware | fe2bc9f71a45d4b9232df8d3fe8d50239c775296 | [
"MIT"
] | null | null | null | tests/test_news.py | Paule3569/feuersoftware | fe2bc9f71a45d4b9232df8d3fe8d50239c775296 | [
"MIT"
] | 1 | 2018-08-28T14:30:02.000Z | 2018-08-28T14:30:02.000Z | #!/usr/bin/env python
#-*- coding: utf-8 -*-
import sys
import os
import pytest
from mock import patch
sys.path.insert(0, os.path.abspath('./'))
from feuersoftware import PublicAPI
TOKEN = '2xgRoQfoMGb4IveCDJIZqOO1l8hZZ5jT5mAw7SSk1otrFSq50IA2HIYB3luEpv7Vw8BWwG'\
'Y2zV96VUkOF3FCZs2OP03qaTWF3CDrUHOKndvLIFTTgx0FCMBTFBRF1DfG4g3rs8BSMHB4'\
'6qph1AlxOZ6parmJlp90V3GQB4EoI6DFdKE4SZeBuu46mXoaDlSmpTTS3FCpeG7oEUJVgy'\
'pLZkZSFPRng5HdKhp6HG2XmNIMAtKTG3DAUWuKRi3cZ4JstLj05y4r7jt81g4DYXz9gVYc'\
'UWk2pOkIZ9RPmu0s4LlaXHEK3TJlxLIUt5eHIzPUVKXyhdJDckviPsTYNfRxkpcNGd0vAb'\
'zfzwMadgb4xaOi1v6ZpsRfXyOPgpudcnO6rwwi9TlAWNZ2075CO7HVFEP31yGhXmYsdFwj'\
'ne3UIraWovMWHqeyv2yQLigKLePDAgXYUFqQpZ9P5ScznSMUg0ZnxS0Miy0qKe9zDYtqTk'\
'qQVwrUGfGVFp4Ti83NJLCCGUOCmF0ovOB28mYyQIqGAi2MDaNIuAvz6HT1tGAo5nYdzOeu'
@patch("feuersoftware.logging.Logger.info")
@patch("feuersoftware.requests")
@patch("feuersoftware.logging.Logger.error")
@patch("feuersoftware.requests")
@patch("feuersoftware.logging.Logger.info")
@patch("feuersoftware.requests")
@patch("feuersoftware.logging.Logger.info")
@patch("feuersoftware.requests")
@patch("feuersoftware.logging.Logger.info")
@patch("feuersoftware.logging.Logger.warning")
@patch("feuersoftware.requests")
@patch("feuersoftware.logging.Logger.error")
@patch("feuersoftware.requests")
@patch("feuersoftware.logging.Logger.info")
@patch("feuersoftware.requests")
@patch("feuersoftware.logging.Logger.error")
@patch("feuersoftware.requests")
@patch("feuersoftware.logging.Logger.info")
@patch("feuersoftware.requests")
@patch("feuersoftware.logging.Logger.info")
@patch("feuersoftware.requests")
@patch("feuersoftware.logging.Logger.info")
@patch("feuersoftware.logging.Logger.warning")
@patch("feuersoftware.requests")
@patch("feuersoftware.logging.Logger.error")
@patch("feuersoftware.requests")
| 40.958848 | 128 | 0.683412 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
import sys
import os
import pytest
from mock import patch
sys.path.insert(0, os.path.abspath('./'))
from feuersoftware import PublicAPI
TOKEN = '2xgRoQfoMGb4IveCDJIZqOO1l8hZZ5jT5mAw7SSk1otrFSq50IA2HIYB3luEpv7Vw8BWwG'\
'Y2zV96VUkOF3FCZs2OP03qaTWF3CDrUHOKndvLIFTTgx0FCMBTFBRF1DfG4g3rs8BSMHB4'\
'6qph1AlxOZ6parmJlp90V3GQB4EoI6DFdKE4SZeBuu46mXoaDlSmpTTS3FCpeG7oEUJVgy'\
'pLZkZSFPRng5HdKhp6HG2XmNIMAtKTG3DAUWuKRi3cZ4JstLj05y4r7jt81g4DYXz9gVYc'\
'UWk2pOkIZ9RPmu0s4LlaXHEK3TJlxLIUt5eHIzPUVKXyhdJDckviPsTYNfRxkpcNGd0vAb'\
'zfzwMadgb4xaOi1v6ZpsRfXyOPgpudcnO6rwwi9TlAWNZ2075CO7HVFEP31yGhXmYsdFwj'\
'ne3UIraWovMWHqeyv2yQLigKLePDAgXYUFqQpZ9P5ScznSMUg0ZnxS0Miy0qKe9zDYtqTk'\
'qQVwrUGfGVFp4Ti83NJLCCGUOCmF0ovOB28mYyQIqGAi2MDaNIuAvz6HT1tGAo5nYdzOeu'
@patch("feuersoftware.logging.Logger.info")
@patch("feuersoftware.requests")
def test_get_news(mock_requests, mock_info):
mock_requests.get.return_value.status_code = 200
api = PublicAPI(TOKEN)
api.get_news()
mock_requests.get.assert_called_once_with(
f"https://connectapi.feuersoftware.com/interfaces/public/news",
headers={"authorization": f"bearer {TOKEN}",
"accept": "application/json",
"content-type": "application/json"})
mock_info.assert_called_with("Success, API call 'get news' complete")
@patch("feuersoftware.logging.Logger.error")
@patch("feuersoftware.requests")
def test_error_get_news(mock_requests, mock_error):
mock_requests.get.return_value.status_code = 401
mock_requests.get.return_value.text = "unauthorized"
api = PublicAPI("ABCD")
api.get_news()
mock_error.assert_called_with("Error while sending API call 'get news': 401 unauthorized")
@patch("feuersoftware.logging.Logger.info")
@patch("feuersoftware.requests")
def test_minimal_post_news(mock_requests, mock_info):
mock_requests.post.return_value.status_code = 200
api = PublicAPI(TOKEN)
api.post_news(
title="Test Title",
content="Test Content",
start="2019-06-01T12:00:00",
end="2019-06-01T18:00:00")
mock_requests.post.assert_called_once_with(
f"https://connectapi.feuersoftware.com/interfaces/public/news?newsType=siteNews",
data='{"title": "Test Title", "content": "Test Content", "start": "2019-06-01T12:00:00", "end": "2019-06-01T18:00:00"}',
headers={"authorization": f"bearer {TOKEN}",
"accept": "application/json",
"content-type": "application/json"})
mock_info.assert_called_with("Success, API call 'post news' complete")
@patch("feuersoftware.logging.Logger.info")
@patch("feuersoftware.requests")
def test_full_post_news(mock_requests, mock_info):
mock_requests.post.return_value.status_code = 200
api = PublicAPI(TOKEN)
api.post_news(
title="Test Title",
content="Test Content",
start="2019-06-01T12:00:00",
end="2019-06-01T18:00:00",
groups=["Kommandanten","Ausbilder"],
mailinglists=["Kommando-ML","Ausbilder-ML"],
site="Gerätehaus")
mock_requests.post.assert_called_once_with(
f"https://connectapi.feuersoftware.com/interfaces/public/news?newsType=siteNews",
data='{'
'"title": "Test Title", '
'"content": "Test Content", '
'"start": "2019-06-01T12:00:00", '
'"end": "2019-06-01T18:00:00", '
'"groups": ["Kommandanten", "Ausbilder"], '
'"mailinglists": ["Kommando-ML", "Ausbilder-ML"], '
'"site": "Ger\\u00e4tehaus"'
'}',
headers={"authorization": f"bearer {TOKEN}",
"accept": "application/json",
"content-type": "application/json"})
mock_info.assert_called_with("Success, API call 'post news' complete")
@patch("feuersoftware.logging.Logger.info")
@patch("feuersoftware.logging.Logger.warning")
@patch("feuersoftware.requests")
def test_invalid_arg_post_news(mock_requests, mock_warning, mock_info):
mock_requests.post.return_value.status_code = 200
api = PublicAPI(TOKEN)
api.post_news(
title="Test Title",
content="Test Content",
start="2019-06-01T12:00:00",
end="2019-06-01T18:00:00",
invalid_arg="invalid")
mock_warning.assert_called_with('Invalid argument passed to post_news: invalid_arg=invalid')
mock_requests.post.assert_called_once_with(
f"https://connectapi.feuersoftware.com/interfaces/public/news?newsType=siteNews",
data='{"title": "Test Title", "content": "Test Content", "start": "2019-06-01T12:00:00", "end": "2019-06-01T18:00:00"}',
headers={"authorization": f"bearer {TOKEN}",
"accept": "application/json",
"content-type": "application/json"})
mock_info.assert_called_with("Success, API call 'post news' complete")
@patch("feuersoftware.logging.Logger.error")
@patch("feuersoftware.requests")
def test_error_post_news(mock_requests, mock_error):
mock_requests.post.return_value.status_code = 401
mock_requests.post.return_value.text = "unauthorized"
api = PublicAPI("ABCD")
api.post_news(
title="Test Title",
content="Test Content",
start="2019-06-01T12:00:00",
end="2019-06-01T18:00:00")
mock_error.assert_called_with("Error while sending API call 'post news': 401 unauthorized")
@patch("feuersoftware.logging.Logger.info")
@patch("feuersoftware.requests")
def test_delete_news(mock_requests, mock_info):
mock_requests.delete.return_value.status_code = 204
api = PublicAPI(TOKEN)
api.delete_news(1)
mock_requests.delete.assert_called_once_with(
f"https://connectapi.feuersoftware.com/interfaces/public/news/1",
headers={"authorization": f"bearer {TOKEN}",
"accept": "application/json",
"content-type": "application/json"})
mock_info.assert_called_with("Success, API call 'delete news' complete")
@patch("feuersoftware.logging.Logger.error")
@patch("feuersoftware.requests")
def test_error_delete_news(mock_requests, mock_error):
mock_requests.delete.return_value.status_code = 401
mock_requests.delete.return_value.text = "unauthorized"
api = PublicAPI("ABCD")
api.delete_news(1)
mock_error.assert_called_with("Error while sending API call 'delete news': 401 unauthorized")
@patch("feuersoftware.logging.Logger.info")
@patch("feuersoftware.requests")
def test_minimal_put_news(mock_requests, mock_info):
mock_requests.put.return_value.status_code = 200
api = PublicAPI(TOKEN)
api.put_news(
id=1,
title="Test Title",
content="Test Content",
start="2019-06-01T12:00:00",
end="2019-06-01T18:00:00")
mock_requests.put.assert_called_once_with(
f"https://connectapi.feuersoftware.com/interfaces/public/news/1",
data='{"title": "Test Title", "content": "Test Content", "start": "2019-06-01T12:00:00", "end": "2019-06-01T18:00:00"}',
headers={"authorization": f"bearer {TOKEN}",
"accept": "application/json",
"content-type": "application/json"})
mock_info.assert_called_with("Success, API call 'put news' complete")
@patch("feuersoftware.logging.Logger.info")
@patch("feuersoftware.requests")
def test_full_put_news(mock_requests, mock_info):
mock_requests.put.return_value.status_code = 200
api = PublicAPI(TOKEN)
api.put_news(
id=1,
title="Test Title",
content="Test Content",
start="2019-06-01T12:00:00",
end="2019-06-01T18:00:00",
groups=["Kommandanten","Ausbilder"],
mailinglists=["Kommando-ML","Ausbilder-ML"],
site="Gerätehaus")
mock_requests.put.assert_called_once_with(
f"https://connectapi.feuersoftware.com/interfaces/public/news/1",
data='{'
'"title": "Test Title", '
'"content": "Test Content", '
'"start": "2019-06-01T12:00:00", '
'"end": "2019-06-01T18:00:00", '
'"groups": ["Kommandanten", "Ausbilder"], '
'"mailinglists": ["Kommando-ML", "Ausbilder-ML"], '
'"site": "Ger\\u00e4tehaus"'
'}',
headers={"authorization": f"bearer {TOKEN}",
"accept": "application/json",
"content-type": "application/json"})
mock_info.assert_called_with("Success, API call 'put news' complete")
@patch("feuersoftware.logging.Logger.info")
@patch("feuersoftware.logging.Logger.warning")
@patch("feuersoftware.requests")
def test_invalid_arg_put_news(mock_requests, mock_warning, mock_info):
mock_requests.put.return_value.status_code = 200
api = PublicAPI(TOKEN)
api.put_news(
id=1,
title="Test Title",
content="Test Content",
start="2019-06-01T12:00:00",
end="2019-06-01T18:00:00",
invalid_arg="invalid")
mock_warning.assert_called_with('Invalid argument passed to put_news: invalid_arg=invalid')
mock_requests.put.assert_called_once_with(
f"https://connectapi.feuersoftware.com/interfaces/public/news/1",
data='{"title": "Test Title", "content": "Test Content", "start": "2019-06-01T12:00:00", "end": "2019-06-01T18:00:00"}',
headers={"authorization": f"bearer {TOKEN}",
"accept": "application/json",
"content-type": "application/json"})
mock_info.assert_called_with("Success, API call 'put news' complete")
@patch("feuersoftware.logging.Logger.error")
@patch("feuersoftware.requests")
def test_error_put_news(mock_requests, mock_error):
mock_requests.put.return_value.status_code = 401
mock_requests.put.return_value.text = "unauthorized"
api = PublicAPI("ABCD")
api.put_news(
id=1,
title="Test Title",
content="Test Content",
start="2019-06-01T12:00:00",
end="2019-06-01T18:00:00")
mock_error.assert_called_with("Error while sending API call 'put news': 401 unauthorized")
| 7,805 | 0 | 264 |
abcd1e3a4b4fa40687d4d76ca3859663c28333f4 | 1,266 | py | Python | strangeflix/provider/migrations/0013_auto_20201027_1642.py | samsoldeinstein/webster2020 | 9795635e806caa261bb33d629f3d1f2bd603638c | [
"MIT"
] | 6 | 2020-11-02T16:40:56.000Z | 2020-11-07T06:59:00.000Z | strangeflix/provider/migrations/0013_auto_20201027_1642.py | samsoldeinstein/webster2020 | 9795635e806caa261bb33d629f3d1f2bd603638c | [
"MIT"
] | null | null | null | strangeflix/provider/migrations/0013_auto_20201027_1642.py | samsoldeinstein/webster2020 | 9795635e806caa261bb33d629f3d1f2bd603638c | [
"MIT"
] | 2 | 2020-11-03T05:20:25.000Z | 2020-11-03T05:38:47.000Z | # Generated by Django 3.1.2 on 2020-10-27 11:12
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| 38.363636 | 242 | 0.629542 | # Generated by Django 3.1.2 on 2020-10-27 11:12
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('provider', '0012_reportcomment_reportvideo'),
]
operations = [
migrations.AlterField(
model_name='reportcomment',
name='flag_val',
field=models.PositiveSmallIntegerField(choices=[(1, 'Unwanted commercial content or spam'), (2, 'Sexually explicit material'), (3, 'Child abuse'), (4, 'Hate speech or graphic violence'), (5, 'Harassment or bullying')], default=1),
),
migrations.CreateModel(
name='Favourites',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('users', models.ManyToManyField(related_name='favourites', to=settings.AUTH_USER_MODEL)),
('video_id', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='provider.videos')),
],
options={
'verbose_name_plural': 'Favourites',
},
),
]
| 0 | 1,086 | 23 |
b831d0118bd1cb0ebd9c4bf798fd205bc356bfe5 | 4,952 | py | Python | result_service_gui/services/dashboard_adapter.py | abdulfahad66/result-service-gui | 214342dd6d00f1173bfe90f8429c7d6c9947783b | [
"Apache-2.0"
] | null | null | null | result_service_gui/services/dashboard_adapter.py | abdulfahad66/result-service-gui | 214342dd6d00f1173bfe90f8429c7d6c9947783b | [
"Apache-2.0"
] | null | null | null | result_service_gui/services/dashboard_adapter.py | abdulfahad66/result-service-gui | 214342dd6d00f1173bfe90f8429c7d6c9947783b | [
"Apache-2.0"
] | null | null | null | """Module for events adapter."""
import copy
import logging
import os
from typing import List
from aiohttp import ClientSession
from aiohttp import hdrs
from aiohttp import web
from multidict import MultiDict
EVENTS_HOST_SERVER = os.getenv("EVENTS_HOST_SERVER", "localhost")
EVENTS_HOST_PORT = os.getenv("EVENTS_HOST_PORT", "8082")
EVENT_SERVICE_URL = f"http://{EVENTS_HOST_SERVER}:{EVENTS_HOST_PORT}"
class DashboardAdapter:
"""Class representing events."""
async def get_all_events(self, token: str) -> List:
"""Get all events function."""
events = []
headers = MultiDict(
[
(hdrs.CONTENT_TYPE, "application/json"),
(hdrs.AUTHORIZATION, f"Bearer {token}"),
]
)
async with ClientSession() as session:
async with session.get(
f"{EVENT_SERVICE_URL}/events", headers=headers
) as resp:
logging.debug(f"get_all_events - got response {resp.status}")
if resp.status == 200:
events = await resp.json()
logging.debug(f"events - got response {events}")
elif resp.status == 401:
logging.info("TODO Performing new login")
# Perform login
else:
logging.error(f"Error {resp.status} getting events: {resp} ")
return events
async def get_event(self, token: str, id: str) -> dict:
"""Get event function."""
event = {}
headers = MultiDict(
[
(hdrs.CONTENT_TYPE, "application/json"),
(hdrs.AUTHORIZATION, f"Bearer {token}"),
]
)
async with ClientSession() as session:
async with session.get(
f"{EVENT_SERVICE_URL}/events/{id}", headers=headers
) as resp:
logging.debug(f"get_event {id} - got response {resp.status}")
if resp.status == 200:
event = await resp.json()
logging.debug(f"event - got response {event}")
else:
logging.error(f"Error {resp.status} getting events: {resp} ")
return event
async def create_event(self, token: str, event: dict) -> str:
"""Create new event function."""
id = ""
headers = MultiDict(
[
(hdrs.CONTENT_TYPE, "application/json"),
(hdrs.AUTHORIZATION, f"Bearer {token}"),
]
)
request_body = copy.deepcopy(event)
async with ClientSession() as session:
async with session.post(
f"{EVENT_SERVICE_URL}/events", headers=headers, json=request_body
) as resp:
if resp.status == 201:
logging.debug(f"result - got response {resp}")
location = resp.headers[hdrs.LOCATION]
id = location.split(os.path.sep)[-1]
else:
logging.error(f"create_event failed - {resp.status}")
raise web.HTTPBadRequest(reason="Create event failed.")
return id
async def delete_event(self, token: str, id: str) -> str:
"""Delete event function."""
headers = MultiDict(
[
(hdrs.CONTENT_TYPE, "application/json"),
(hdrs.AUTHORIZATION, f"Bearer {token}"),
]
)
url = f"{EVENT_SERVICE_URL}/events/{id}"
async with ClientSession() as session:
async with session.delete(url, headers=headers) as response:
pass
logging.debug(f"Delete event: {id} - res {response.status}")
if response.status == 204:
logging.debug(f"result - got response {response}")
else:
logging.error(f"delete_event failed - {response.status}, {response}")
raise web.HTTPBadRequest(reason="Delete event failed.")
return str(response.status)
async def update_event(self, token: str, id: str, request_body: dict) -> str:
"""Update event function."""
headers = MultiDict(
[
(hdrs.CONTENT_TYPE, "application/json"),
(hdrs.AUTHORIZATION, f"Bearer {token}"),
]
)
async with ClientSession() as session:
async with session.put(
f"{EVENT_SERVICE_URL}/events/{id}", headers=headers, json=request_body
) as resp:
if resp.status == 204:
logging.debug(f"update event - got response {resp}")
else:
logging.error(f"update_event failed - {resp.status}")
raise web.HTTPBadRequest(reason="Update event failed.")
logging.debug(f"Updated event: {id} - res {resp.status}")
return str(resp.status)
| 37.515152 | 86 | 0.539782 | """Module for events adapter."""
import copy
import logging
import os
from typing import List
from aiohttp import ClientSession
from aiohttp import hdrs
from aiohttp import web
from multidict import MultiDict
EVENTS_HOST_SERVER = os.getenv("EVENTS_HOST_SERVER", "localhost")
EVENTS_HOST_PORT = os.getenv("EVENTS_HOST_PORT", "8082")
EVENT_SERVICE_URL = f"http://{EVENTS_HOST_SERVER}:{EVENTS_HOST_PORT}"
class DashboardAdapter:
"""Class representing events."""
async def get_all_events(self, token: str) -> List:
"""Get all events function."""
events = []
headers = MultiDict(
[
(hdrs.CONTENT_TYPE, "application/json"),
(hdrs.AUTHORIZATION, f"Bearer {token}"),
]
)
async with ClientSession() as session:
async with session.get(
f"{EVENT_SERVICE_URL}/events", headers=headers
) as resp:
logging.debug(f"get_all_events - got response {resp.status}")
if resp.status == 200:
events = await resp.json()
logging.debug(f"events - got response {events}")
elif resp.status == 401:
logging.info("TODO Performing new login")
# Perform login
else:
logging.error(f"Error {resp.status} getting events: {resp} ")
return events
async def get_event(self, token: str, id: str) -> dict:
"""Get event function."""
event = {}
headers = MultiDict(
[
(hdrs.CONTENT_TYPE, "application/json"),
(hdrs.AUTHORIZATION, f"Bearer {token}"),
]
)
async with ClientSession() as session:
async with session.get(
f"{EVENT_SERVICE_URL}/events/{id}", headers=headers
) as resp:
logging.debug(f"get_event {id} - got response {resp.status}")
if resp.status == 200:
event = await resp.json()
logging.debug(f"event - got response {event}")
else:
logging.error(f"Error {resp.status} getting events: {resp} ")
return event
async def create_event(self, token: str, event: dict) -> str:
"""Create new event function."""
id = ""
headers = MultiDict(
[
(hdrs.CONTENT_TYPE, "application/json"),
(hdrs.AUTHORIZATION, f"Bearer {token}"),
]
)
request_body = copy.deepcopy(event)
async with ClientSession() as session:
async with session.post(
f"{EVENT_SERVICE_URL}/events", headers=headers, json=request_body
) as resp:
if resp.status == 201:
logging.debug(f"result - got response {resp}")
location = resp.headers[hdrs.LOCATION]
id = location.split(os.path.sep)[-1]
else:
logging.error(f"create_event failed - {resp.status}")
raise web.HTTPBadRequest(reason="Create event failed.")
return id
async def delete_event(self, token: str, id: str) -> str:
"""Delete event function."""
headers = MultiDict(
[
(hdrs.CONTENT_TYPE, "application/json"),
(hdrs.AUTHORIZATION, f"Bearer {token}"),
]
)
url = f"{EVENT_SERVICE_URL}/events/{id}"
async with ClientSession() as session:
async with session.delete(url, headers=headers) as response:
pass
logging.debug(f"Delete event: {id} - res {response.status}")
if response.status == 204:
logging.debug(f"result - got response {response}")
else:
logging.error(f"delete_event failed - {response.status}, {response}")
raise web.HTTPBadRequest(reason="Delete event failed.")
return str(response.status)
async def update_event(self, token: str, id: str, request_body: dict) -> str:
"""Update event function."""
headers = MultiDict(
[
(hdrs.CONTENT_TYPE, "application/json"),
(hdrs.AUTHORIZATION, f"Bearer {token}"),
]
)
async with ClientSession() as session:
async with session.put(
f"{EVENT_SERVICE_URL}/events/{id}", headers=headers, json=request_body
) as resp:
if resp.status == 204:
logging.debug(f"update event - got response {resp}")
else:
logging.error(f"update_event failed - {resp.status}")
raise web.HTTPBadRequest(reason="Update event failed.")
logging.debug(f"Updated event: {id} - res {resp.status}")
return str(resp.status)
| 0 | 0 | 0 |
5dd8c1a13aa126acef031096fe6c5f2daa3b4777 | 1,726 | py | Python | scripts/feots_compare.py | schoonovernumerics/FEOTs | d8bf24d0e0c23a9ee65e2be6a75f5dbc83d3e5ad | [
"BSD-3-Clause"
] | null | null | null | scripts/feots_compare.py | schoonovernumerics/FEOTs | d8bf24d0e0c23a9ee65e2be6a75f5dbc83d3e5ad | [
"BSD-3-Clause"
] | 13 | 2017-08-03T22:30:25.000Z | 2019-01-23T16:32:28.000Z | scripts/feots_compare.py | schoonovernumerics/FEOTS | d8bf24d0e0c23a9ee65e2be6a75f5dbc83d3e5ad | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python3
DOC="""feots_compare
feots_compare is use to compare two FEOTS NetCDF output files and report simple statistics.
Currently feots_compare will generate a histogram of log_{10}( |f_1 - f_2| ) where f_1 and
f_2 are tracer fields from two FEOTS output files.
Usage:
feots_compare absdiff <file1> <file2> [--field=<tracerfield>]
Commands:
absdiff Compute statistics using absolute differences between two FEOTS files
Options:
-h --help Display this help screen
--field=<string> Specification of the field in the NetCDF file to compare [default: DyeTracer_01]
"""
import netCDF4 as nc
import numpy as np
from matplotlib import pyplot as plt
from docopt import docopt
#END parse_cli
#END main
if __name__ == '__main__':
main()
| 27.83871 | 103 | 0.648899 | #!/usr/bin/python3
DOC="""feots_compare
feots_compare is use to compare two FEOTS NetCDF output files and report simple statistics.
Currently feots_compare will generate a histogram of log_{10}( |f_1 - f_2| ) where f_1 and
f_2 are tracer fields from two FEOTS output files.
Usage:
feots_compare absdiff <file1> <file2> [--field=<tracerfield>]
Commands:
absdiff Compute statistics using absolute differences between two FEOTS files
Options:
-h --help Display this help screen
--field=<string> Specification of the field in the NetCDF file to compare [default: DyeTracer_01]
"""
import netCDF4 as nc
import numpy as np
from matplotlib import pyplot as plt
from docopt import docopt
def parse_cli():
args = docopt(DOC,version='feots_compare 0.0.0')
return args
#END parse_cli
def load_netcdf(filename, field):
rootgrp = nc.Dataset(filename,"r",format="NETCDF4")
ncdata = rootgrp.variables[field][:]
return ncdata
def main():
args = parse_cli()
if args['absdiff'] :
print('Comparing {FIELD} in {FILE1} and {FILE2}'.format(FIELD=args['--field'],
FILE1=args['<file1>'],
FILE2=args['<file2>']))
file1_data = load_netcdf(args['<file1>'],args['--field'])
file2_data = load_netcdf(args['<file2>'],args['--field'])
absdiff = np.log10(np.absolute( file1_data - file2_data ))
absdiffHist, absdiffBins = np.histogram(absdiff, bins=50, range=(-16, 0))
print(absdiffBins)
print(absdiffHist)
plt.hist(absdiff.flatten(), absdiffBins)
plt.title("histogram")
plt.show()
#END main
if __name__ == '__main__':
main()
| 870 | 0 | 69 |
c9e98295e343002fd25fd863b2b291218a72484e | 4,292 | py | Python | opendc-web/opendc-web-api/opendc/util/rest.py | Koen1999/opendc | f9b43518d2d50f33077734537a477539fca9f5b7 | [
"MIT"
] | null | null | null | opendc-web/opendc-web-api/opendc/util/rest.py | Koen1999/opendc | f9b43518d2d50f33077734537a477539fca9f5b7 | [
"MIT"
] | 4 | 2020-11-27T16:27:58.000Z | 2020-12-28T23:00:08.000Z | opendc-web/opendc-web-api/opendc/util/rest.py | Koen1999/opendc | f9b43518d2d50f33077734537a477539fca9f5b7 | [
"MIT"
] | null | null | null | import importlib
import json
import os
from oauth2client import client, crypt
from opendc.util import exceptions, parameter_checker
from opendc.util.exceptions import ClientError
class Request:
"""WebSocket message to REST request mapping."""
def __init__(self, message=None):
""""Initialize a Request from a socket message."""
# Get the Request parameters from the message
if message is None:
return
try:
self.message = message
self.id = message['id']
self.path = message['path']
self.method = message['method']
self.params_body = message['parameters']['body']
self.params_path = message['parameters']['path']
self.params_query = message['parameters']['query']
self.token = message['token']
except KeyError as exception:
raise exceptions.MissingRequestParameterError(exception)
# Parse the path and import the appropriate module
try:
self.path = message['path'].strip('/')
module_base = 'opendc.api.{}.endpoint'
module_path = self.path.replace('{', '').replace('}', '').replace('/', '.')
self.module = importlib.import_module(module_base.format(module_path))
except ImportError as e:
print(e)
raise exceptions.UnimplementedEndpointError('Unimplemented endpoint: {}.'.format(self.path))
# Check the method
if self.method not in ['POST', 'GET', 'PUT', 'PATCH', 'DELETE']:
raise exceptions.UnsupportedMethodError('Non-rest method: {}'.format(self.method))
if not hasattr(self.module, self.method):
raise exceptions.UnsupportedMethodError('Unimplemented method at endpoint {}: {}'.format(
self.path, self.method))
# Verify the user
if "OPENDC_FLASK_TESTING" in os.environ:
self.google_id = 'test'
return
try:
self.google_id = self._verify_token(self.token)
except crypt.AppIdentityError as e:
raise exceptions.AuthorizationTokenError(e)
def check_required_parameters(self, **kwargs):
"""Raise an error if a parameter is missing or of the wrong type."""
try:
parameter_checker.check(self, **kwargs)
except exceptions.ParameterError as e:
raise ClientError(Response(400, str(e)))
def process(self):
"""Process the Request and return a Response."""
method = getattr(self.module, self.method)
try:
response = method(self)
except ClientError as e:
e.response.id = self.id
return e.response
response.id = self.id
return response
def to_JSON(self):
"""Return a JSON representation of this Request"""
self.message['id'] = 0
self.message['token'] = None
return json.dumps(self.message)
@staticmethod
def _verify_token(token):
"""Return the ID of the signed-in user.
Or throw an Exception if the token is invalid.
"""
try:
id_info = client.verify_id_token(token, os.environ['OPENDC_OAUTH_CLIENT_ID'])
except Exception as e:
print(e)
raise crypt.AppIdentityError('Exception caught trying to verify ID token: {}'.format(e))
if id_info['aud'] != os.environ['OPENDC_OAUTH_CLIENT_ID']:
raise crypt.AppIdentityError('Unrecognized client.')
if id_info['iss'] not in ['accounts.google.com', 'https://accounts.google.com']:
raise crypt.AppIdentityError('Wrong issuer.')
return id_info['sub']
class Response:
"""Response to websocket mapping"""
def __init__(self, status_code, status_description, content=None):
"""Initialize a new Response."""
self.id = 0
self.status = {'code': status_code, 'description': status_description}
self.content = content
def to_JSON(self):
""""Return a JSON representation of this Response"""
data = {'id': self.id, 'status': self.status}
if self.content is not None:
data['content'] = self.content
return json.dumps(data, default=str)
| 30.225352 | 104 | 0.608574 | import importlib
import json
import os
from oauth2client import client, crypt
from opendc.util import exceptions, parameter_checker
from opendc.util.exceptions import ClientError
class Request:
"""WebSocket message to REST request mapping."""
def __init__(self, message=None):
""""Initialize a Request from a socket message."""
# Get the Request parameters from the message
if message is None:
return
try:
self.message = message
self.id = message['id']
self.path = message['path']
self.method = message['method']
self.params_body = message['parameters']['body']
self.params_path = message['parameters']['path']
self.params_query = message['parameters']['query']
self.token = message['token']
except KeyError as exception:
raise exceptions.MissingRequestParameterError(exception)
# Parse the path and import the appropriate module
try:
self.path = message['path'].strip('/')
module_base = 'opendc.api.{}.endpoint'
module_path = self.path.replace('{', '').replace('}', '').replace('/', '.')
self.module = importlib.import_module(module_base.format(module_path))
except ImportError as e:
print(e)
raise exceptions.UnimplementedEndpointError('Unimplemented endpoint: {}.'.format(self.path))
# Check the method
if self.method not in ['POST', 'GET', 'PUT', 'PATCH', 'DELETE']:
raise exceptions.UnsupportedMethodError('Non-rest method: {}'.format(self.method))
if not hasattr(self.module, self.method):
raise exceptions.UnsupportedMethodError('Unimplemented method at endpoint {}: {}'.format(
self.path, self.method))
# Verify the user
if "OPENDC_FLASK_TESTING" in os.environ:
self.google_id = 'test'
return
try:
self.google_id = self._verify_token(self.token)
except crypt.AppIdentityError as e:
raise exceptions.AuthorizationTokenError(e)
def check_required_parameters(self, **kwargs):
"""Raise an error if a parameter is missing or of the wrong type."""
try:
parameter_checker.check(self, **kwargs)
except exceptions.ParameterError as e:
raise ClientError(Response(400, str(e)))
def process(self):
"""Process the Request and return a Response."""
method = getattr(self.module, self.method)
try:
response = method(self)
except ClientError as e:
e.response.id = self.id
return e.response
response.id = self.id
return response
def to_JSON(self):
"""Return a JSON representation of this Request"""
self.message['id'] = 0
self.message['token'] = None
return json.dumps(self.message)
@staticmethod
def _verify_token(token):
"""Return the ID of the signed-in user.
Or throw an Exception if the token is invalid.
"""
try:
id_info = client.verify_id_token(token, os.environ['OPENDC_OAUTH_CLIENT_ID'])
except Exception as e:
print(e)
raise crypt.AppIdentityError('Exception caught trying to verify ID token: {}'.format(e))
if id_info['aud'] != os.environ['OPENDC_OAUTH_CLIENT_ID']:
raise crypt.AppIdentityError('Unrecognized client.')
if id_info['iss'] not in ['accounts.google.com', 'https://accounts.google.com']:
raise crypt.AppIdentityError('Wrong issuer.')
return id_info['sub']
class Response:
"""Response to websocket mapping"""
def __init__(self, status_code, status_description, content=None):
"""Initialize a new Response."""
self.id = 0
self.status = {'code': status_code, 'description': status_description}
self.content = content
def to_JSON(self):
""""Return a JSON representation of this Response"""
data = {'id': self.id, 'status': self.status}
if self.content is not None:
data['content'] = self.content
return json.dumps(data, default=str)
| 0 | 0 | 0 |
aee06431ad2ec6bcfc0d5ab724b78f72227eea3a | 49,614 | py | Python | Scripts/simulation/statistics/ranked_statistic.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/simulation/statistics/ranked_statistic.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/simulation/statistics/ranked_statistic.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\statistics\ranked_statistic.py
# Compiled at: 2020-08-11 17:51:45
# Size of source mod 2**32: 58267 bytes
from protocolbuffers import SimObjectAttributes_pb2 as protocols, Commodities_pb2
import contextlib, operator
from bucks.bucks_enums import BucksType
from bucks.bucks_utils import BucksUtils
from distributor.shared_messages import IconInfoData
from event_testing.resolver import SingleSimResolver
from event_testing.test_events import TestEvent
from interactions.utils.loot import LootActions
from interactions.utils.tunable_icon import TunableIcon
from sims4.localization import TunableLocalizedString, TunableLocalizedStringFactory
from sims4.math import Threshold
from sims4.tuning.instances import HashedTunedInstanceMetaclass
from sims4.tuning.tunable import HasTunableReference, OptionalTunable, TunableList, Tunable, TunableMapping, TunableTuple, TunableEnumEntry, TunableResourceKey, TunableRange, TunableReference, TunableColor
from sims4.tuning.tunable_base import ExportModes, GroupNames
from sims4.utils import constproperty, classproperty, flexmethod
from singletons import DEFAULT
from statistics.commodity_messages import send_sim_ranked_stat_update_message, send_sim_ranked_stat_change_rank_change_update_message
from statistics.progressive_statistic_callback_mixin import ProgressiveStatisticCallbackMixin
from statistics.statistic_enums import StatisticLockAction
from ui.ui_dialog import UiDialogResponse
from ui.ui_dialog_notification import UiDialogNotification
import event_testing, services, sims4.log, sims4.resources, statistics, tag, telemetry_helper, ui.screen_slam
logger = sims4.log.Logger('RankedStatistic', default_owner='rfleig')
TELEMETRY_GROUP_RANKED_STAT = 'RKST'
TELEMETRY_HOOK_RANKED_STAT_LEVEL_CHANGE = 'LEVE'
TELEMETRY_FIELD_RANKED_STAT_TYPE = 'type'
TELEMETRY_FIELD_RANKED_STAT_LEVEL = 'leve'
ranked_stat_telemetry_writer = sims4.telemetry.TelemetryWriter(TELEMETRY_GROUP_RANKED_STAT)
L. 786 0 LOAD_CONST 0
2 STORE_FAST 'batch_rank_levels'
L. 787 4 SETUP_LOOP 200 'to 200'
6_0 COME_FROM 184 '184'
6 LOAD_FAST 'old_level'
8 LOAD_FAST 'new_level'
10 COMPARE_OP <
12 POP_JUMP_IF_FALSE 198 'to 198'
L. 788 14 LOAD_FAST 'old_level'
16 LOAD_CONST 1
18 INPLACE_ADD
20 STORE_FAST 'old_level'
L. 790 22 LOAD_FAST 'self'
24 LOAD_ATTR event_data
26 LOAD_METHOD get
28 LOAD_FAST 'old_level'
30 CALL_METHOD_1 1 '1 positional argument'
32 STORE_FAST 'event_data'
L. 791 34 LOAD_FAST 'event_data'
36 LOAD_CONST None
38 COMPARE_OP is-not
40 POP_JUMP_IF_FALSE 172 'to 172'
L. 792 42 LOAD_FAST 'self'
44 LOAD_ATTR tracker
46 LOAD_ATTR owner
48 LOAD_ATTR is_simulating
50 POP_JUMP_IF_FALSE 158 'to 158'
L. 793 52 LOAD_GLOBAL SingleSimResolver
54 LOAD_FAST 'self'
56 LOAD_ATTR tracker
58 LOAD_ATTR owner
60 CALL_FUNCTION_1 1 '1 positional argument'
62 STORE_FAST 'resolver'
L. 794 64 LOAD_FAST 'old_level'
66 LOAD_FAST 'self'
68 LOAD_ATTR highest_level
70 COMPARE_OP >
72 STORE_FAST 'is_new_level'
L. 795 74 LOAD_FAST 'is_new_level'
76 POP_JUMP_IF_FALSE 110 'to 110'
L. 797 78 SETUP_LOOP 104 'to 104'
80 LOAD_FAST 'event_data'
82 LOAD_ATTR loot
84 GET_ITER
86 FOR_ITER 102 'to 102'
88 STORE_FAST 'loot'
L. 798 90 LOAD_FAST 'loot'
92 LOAD_METHOD apply_to_resolver
94 LOAD_FAST 'resolver'
96 CALL_METHOD_1 1 '1 positional argument'
98 POP_TOP
100 JUMP_BACK 86 'to 86'
102 POP_BLOCK
104_0 COME_FROM_LOOP 78 '78'
L. 801 104 LOAD_FAST 'old_level'
106 LOAD_FAST 'self'
108 STORE_ATTR highest_level
110_0 COME_FROM 76 '76'
L. 802 110 LOAD_FAST 'event_data'
112 LOAD_ATTR rank_up
114 POP_JUMP_IF_FALSE 130 'to 130'
L. 803 116 LOAD_FAST 'self'
118 LOAD_ATTR increase_rank_level
120 LOAD_FAST 'is_new_level'
122 LOAD_FAST 'from_add'
124 LOAD_CONST ('new_rank', 'from_add')
126 CALL_FUNCTION_KW_2 2 '2 total positional and keyword args'
128 POP_TOP
130_0 COME_FROM 114 '114'
L. 804 130 SETUP_LOOP 172 'to 172'
132 LOAD_FAST 'event_data'
134 LOAD_ATTR loot_always
136 GET_ITER
138 FOR_ITER 154 'to 154'
140 STORE_FAST 'loot'
L. 805 142 LOAD_FAST 'loot'
144 LOAD_METHOD apply_to_resolver
146 LOAD_FAST 'resolver'
148 CALL_METHOD_1 1 '1 positional argument'
150 POP_TOP
152 JUMP_BACK 138 'to 138'
154 POP_BLOCK
156 JUMP_FORWARD 172 'to 172'
158_0 COME_FROM 50 '50'
L. 806 158 LOAD_FAST 'event_data'
160 LOAD_ATTR rank_up
162 POP_JUMP_IF_FALSE 172 'to 172'
L. 807 164 LOAD_FAST 'batch_rank_levels'
166 LOAD_CONST 1
168 INPLACE_ADD
170 STORE_FAST 'batch_rank_levels'
172_0 COME_FROM 162 '162'
172_1 COME_FROM 156 '156'
172_2 COME_FROM_LOOP 130 '130'
172_3 COME_FROM 40 '40'
L. 813 172 LOAD_FAST 'self'
174 LOAD_ATTR tracker
176 LOAD_ATTR owner
178 LOAD_ATTR is_npc
180 POP_JUMP_IF_FALSE 186 'to 186'
182 LOAD_FAST 'from_add'
184 POP_JUMP_IF_TRUE 6 'to 6'
186_0 COME_FROM 180 '180'
L. 816 186 LOAD_FAST 'self'
188 LOAD_METHOD _handle_level_change_telemetry
190 LOAD_FAST 'old_level'
192 CALL_METHOD_1 1 '1 positional argument'
194 POP_TOP
196 JUMP_BACK 6 'to 6'
198_0 COME_FROM 12 '12'
198 POP_BLOCK
200_0 COME_FROM_LOOP 4 '4'
L. 818 200 LOAD_FAST 'batch_rank_levels'
202 LOAD_CONST 0
204 COMPARE_OP >
206 POP_JUMP_IF_FALSE 220 'to 220'
L. 819 208 LOAD_FAST 'self'
210 LOAD_METHOD increase_rank_levels
212 LOAD_FAST 'batch_rank_levels'
214 CALL_METHOD_1 1 '1 positional argument'
216 POP_TOP
218 JUMP_FORWARD 232 'to 232'
220_0 COME_FROM 206 '206'
L. 823 220 LOAD_FAST 'self'
222 LOAD_ATTR create_and_send_commodity_update_msg
224 LOAD_CONST False
226 LOAD_CONST ('is_rate_change',)
228 CALL_FUNCTION_KW_1 1 '1 total positional and keyword args'
230 POP_TOP
232_0 COME_FROM 218 '218'
Parse error at or near `COME_FROM_LOOP' instruction at offset 172_2
@contextlib.contextmanager
@sims4.utils.classproperty
@sims4.utils.classproperty
@sims4.utils.classproperty
@sims4.utils.classproperty
@flexmethod
@constproperty
@classmethod
@classmethod
@classmethod
@flexmethod
@classproperty
@classmethod
@classmethod | 57.892649 | 781 | 0.617023 | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\statistics\ranked_statistic.py
# Compiled at: 2020-08-11 17:51:45
# Size of source mod 2**32: 58267 bytes
from protocolbuffers import SimObjectAttributes_pb2 as protocols, Commodities_pb2
import contextlib, operator
from bucks.bucks_enums import BucksType
from bucks.bucks_utils import BucksUtils
from distributor.shared_messages import IconInfoData
from event_testing.resolver import SingleSimResolver
from event_testing.test_events import TestEvent
from interactions.utils.loot import LootActions
from interactions.utils.tunable_icon import TunableIcon
from sims4.localization import TunableLocalizedString, TunableLocalizedStringFactory
from sims4.math import Threshold
from sims4.tuning.instances import HashedTunedInstanceMetaclass
from sims4.tuning.tunable import HasTunableReference, OptionalTunable, TunableList, Tunable, TunableMapping, TunableTuple, TunableEnumEntry, TunableResourceKey, TunableRange, TunableReference, TunableColor
from sims4.tuning.tunable_base import ExportModes, GroupNames
from sims4.utils import constproperty, classproperty, flexmethod
from singletons import DEFAULT
from statistics.commodity_messages import send_sim_ranked_stat_update_message, send_sim_ranked_stat_change_rank_change_update_message
from statistics.progressive_statistic_callback_mixin import ProgressiveStatisticCallbackMixin
from statistics.statistic_enums import StatisticLockAction
from ui.ui_dialog import UiDialogResponse
from ui.ui_dialog_notification import UiDialogNotification
import event_testing, services, sims4.log, sims4.resources, statistics, tag, telemetry_helper, ui.screen_slam
logger = sims4.log.Logger('RankedStatistic', default_owner='rfleig')
TELEMETRY_GROUP_RANKED_STAT = 'RKST'
TELEMETRY_HOOK_RANKED_STAT_LEVEL_CHANGE = 'LEVE'
TELEMETRY_FIELD_RANKED_STAT_TYPE = 'type'
TELEMETRY_FIELD_RANKED_STAT_LEVEL = 'leve'
ranked_stat_telemetry_writer = sims4.telemetry.TelemetryWriter(TELEMETRY_GROUP_RANKED_STAT)
class RankedStatistic(HasTunableReference, ProgressiveStatisticCallbackMixin, statistics.continuous_statistic_tuning.TunedContinuousStatistic, metaclass=HashedTunedInstanceMetaclass, manager=services.get_instance_manager(sims4.resources.Types.STATISTIC)):
@classmethod
def _verify_tuning_callback(cls):
super()._verify_tuning_callback()
ranks_tuned = [level_data for level_data in cls.event_data.values() if level_data.rank_up]
ranks_needed = len(ranks_tuned) + 1
actual_ranks = len(cls.rank_tuning)
tuned_rank_up_notifications = len(cls.rank_up_notification_tuning)
tuned_rank_down_notifications = len(cls.rank_down_notification_tuning)
if actual_ranks != ranks_needed:
logger.error('{} ranks have been enabled, but there is tuning for {} ranks in the rank_tuning. Please double check the tuning for {}', ranks_needed, actual_ranks, cls)
if actual_ranks != tuned_rank_up_notifications:
logger.error('There are {} ranks tuned but {} rank up notifications tuned. These need to be the same. Please double check the tuning for {}', actual_ranks, tuned_rank_up_notifications, cls)
if tuned_rank_down_notifications > 0:
if actual_ranks != tuned_rank_down_notifications:
logger.error('There are {} ranks tuned but {} rank down notifications tuned. These need to be the same. Please double check the tuning for {}', actual_ranks, tuned_rank_down_notifications, cls)
INSTANCE_TUNABLES = {'stat_name':TunableLocalizedString(description='\n Localized name of this statistic.\n ',
allow_none=True),
'event_intervals':TunableList(description='\n The level boundaries for an event, specified as a delta from the\n previous value.\n ',
tunable=Tunable(description='\n Points required to reach this level.\n ',
tunable_type=int,
default=0),
export_modes=ExportModes.All),
'event_data':TunableMapping(description='\n The data associated with a specific tuned event. \n \n The Key is the event number as tuned in the event intervals.\n \n The value is a list of loots to apply when the event occurs and an\n bool for whether or not to rank up the stat. \n ',
key_type=int,
value_type=TunableTuple(description='\n The data associated with a tuned event from event_intervals.\n ',
rank_up=Tunable(description="\n If checked then this event will cause the statistic to rank\n up and all that entails. Currently that will increment\n the rank count.\n \n There should be a rank up entry for each of the levels \n tuned, except the initial rank. We assume that you don't \n need to rank into the initial rank. This means you will \n need one more level tuned than number of rank up events\n found in this list.\n ",
tunable_type=bool,
default=False),
loot=TunableList(description='\n A list of loots to apply when this event happens. This loot\n is only applied the first time you reach a specific level.\n If you want the loot applied every time you reach a level\n (for instance after you decay to a previous level and then\n regain a level) please use the loot_always tuning.\n ',
tunable=TunableReference(description='\n The loot to apply.\n ',
manager=(services.get_instance_manager(sims4.resources.Types.ACTION)),
class_restrictions=('LootActions', 'RandomWeightedLoot'),
pack_safe=True)),
tooltip=TunableLocalizedStringFactory(description='\n The tooltip to display in the UI for each of the event\n lines. This is to be used for telling the user what loot \n they are going to get at an individual event.\n '),
level_down_loot=TunableList(description='\n A list of loots to apply when the Sim loses enough points \n to level down.\n ',
tunable=LootActions.TunableReference(pack_safe=True)),
tests=event_testing.tests.TunableTestSet(description="\n Tests to run when reaching this level. If the tests don't \n pass then the value will be set back to min points for \n the rank before it. This means that the Sim won't be able\n to make any progress towards the rank with the failed\n tests.\n ",
export_modes=(ExportModes.ServerXML)),
loot_always=TunableList(description='\n This loot is always awarded on level up, regardless of \n whether or not this level has already been achieved or not.\n \n If you want the loot to only be applied the first time you\n reach a certain level then please use the loot tuning.\n ',
tunable=TunableReference(description='\n The loot to award on level up.\n ',
manager=(services.get_instance_manager(sims4.resources.Types.ACTION)),
class_restrictions=('LootActions', 'RandomWeightedLoot'),
pack_safe=True)),
loot_always_on_load=TunableList(description='\n This loot is always awarded when a sim loads with this\n level.\n ',
tunable=LootActions.TunableReference(pack_safe=True)),
export_class_name='EventDataTuple'),
tuple_name='TunableEventData',
export_modes=ExportModes.All),
'initial_rank':Tunable(description='\n The offset of the initial rank for this stat in UI.\n \n The use case of initial rank is if the display of the stat\n in UI needs to start with an initial fill (e.g. Occult Tracker),\n or if the fill first starts as empty (e.g. Fame). Discuss with UI\n what is required.\n ',
tunable_type=int,
default=1,
export_modes=ExportModes.All,
tuning_group=GroupNames.UI),
'rank_tuning':TunableMapping(description='\n This is the tuning that is associated with a specific rank level \n instead of each individual event level. When the rank has increased \n the matching information will be retrieved from here and used.\n \n There needs to be an equal number of ranks tuned to match all of \n the rank up events in event data plus an extra one for the \n rank you start out on initially.\n ',
key_type=int,
value_type=TunableTuple(description='\n A tuple of all the data for each Rank associated wit this\n ranked statistic.\n ',
rank_name=TunableLocalizedString(description="\n The rank's normal name.\n "),
icon=OptionalTunable(description='\n If enabled then the Rank Statistic will have an icon \n associated with this Rank.\n ',
tunable=TunableResourceKey(description='\n Icon to be displayed for the rank.\n ',
resource_types=(sims4.resources.CompoundTypes.IMAGE)),
enabled_by_default=True),
rank_description=OptionalTunable(description='\n When enabled this string will be used as the description\n for the rank.\n ',
tunable=TunableLocalizedString(description="\n The rank's description.\n ")),
rank_short_name=OptionalTunable(description='\n When enabled this string will be used as an alternate \n short name for the rank.\n ',
tunable=TunableLocalizedString(description="\n The rank's short name.\n ")),
rank_color=TunableColor.TunableColorRGBA(description='\n Tunable color tint provided by the rank.\n ',
export_modes=(
ExportModes.ClientBinary,),
tuning_group=(GroupNames.UI)),
hide_in_ui=Tunable(description='\n If checked, this rank will not be shown in some places in the UI (XP bars, Relationship tooltip, Gallery)\n ',
tunable_type=bool,
default=False),
export_class_name='RankDataTuple'),
tuple_name='TunableRankData',
export_modes=ExportModes.All),
'rank_down_notification_tuning':TunableMapping(description='\n A mapping of Rank to tuning needed to display all the notifications\n when a Sim ranks down. \n \n The number of notifications tuned must match the number of items\n in rank_tuning.\n ',
key_type=int,
value_type=TunableTuple(description='\n A Tuple containing both the rank down screen slam and the rank\n down notification to display.\n ',
show_notification_tests=event_testing.tests.TunableTestSet(description='\n Tests that must be true when the we want to show notification.\n '),
rank_down_screen_slam=OptionalTunable(description='\n Screen slam to show when Sim goes down to this rank level.\n Localization Tokens: Sim - {0.SimFirstName}, Rank Name - \n {1.String}, Rank Number - {2.Number}\n ',
tunable=(ui.screen_slam.TunableScreenSlamSnippet())),
rank_down_notification=OptionalTunable(description='\n The notification to display when the Sim obtains this\n rank. The text will be provided two tokens: the Sim owning\n the stat and a number representing the 1-based rank\n level.\n ',
tunable=UiDialogNotification.TunableFactory(locked_args={'text_tokens':DEFAULT,
'icon':None,
'secondary_icon':None})))),
'rank_up_notification_tuning':TunableMapping(description='\n A mapping of Rank to tuning needed to display all the notifications\n when a Sim ranks up. \n \n The number of notifications tuned must match the number of items\n in rank_tuning.\n ',
key_type=int,
value_type=TunableTuple(description='\n A Tuple containing both the rank up screen slam and the rank\n up notification to display.\n ',
show_notification_tests=event_testing.tests.TunableTestSet(description='\n Tests that must be true when the we want to show notification.\n '),
rank_up_screen_slam=OptionalTunable(description='\n Screen slam to show when reaches this rank level.\n Localization Tokens: Sim - {0.SimFirstName}, Rank Name - \n {1.String}, Rank Number - {2.Number}\n \n This will only happen the first time a rank is reached.\n ',
tunable=(ui.screen_slam.TunableScreenSlamSnippet())),
rank_up_notification=OptionalTunable(description='\n The notification to display when the Sim obtains this\n rank. The text will be provided two tokens: the Sim owning\n the stat and a number representing the 1-based rank\n level.\n \n This will only happen the first time a rank is reached. If\n you want to show a display on subsequent rank ups you can \n tune the re_rank_up_notifcation.\n ',
tunable=UiDialogNotification.TunableFactory(locked_args={'text_tokens':DEFAULT,
'icon':None,
'secondary_icon':None})),
re_rank_up_notification=OptionalTunable(description='\n The notification to display when the Sim obtains this rank\n every time other than the first time. For instance if the\n Sim achieves rank 3, drops down to rank 2 because of decay,\n and then re-achieves rank 3, that is when this dialog will\n be displayed.\n \n If you want this dialog to be displayed the first time the\n Sim reaches a rank please tune rank_up_notification instead.\n ',
tunable=UiDialogNotification.TunableFactory(locked_args={'text_tokens':DEFAULT,
'icon':None,
'secondary_icon':None})))),
'tags':TunableList(description='\n The associated categories of the ranked statistic.\n ',
tunable=TunableEnumEntry(tunable_type=(tag.Tag),
default=(tag.Tag.INVALID),
pack_safe=True)),
'icon':TunableIcon(description="\n The ranked stat's icon.\n ",
allow_none=True,
export_modes=ExportModes.All),
'initial_loot':TunableList(description='\n A list of loots to apply when the Sim first receives this ranked\n statistic.\n ',
tunable=LootActions.TunableReference(pack_safe=True)),
'min_decay_per_highest_level_achieved':TunableMapping(description='\n A mapping of highest level reached to the absolute minimum \n that this Ranked Stat is allowed to decay to in ranks.\n ',
key_type=int,
value_type=TunableRange(description='\n The lowest level this stat can decay to based on the associated\n highest level reached.\n ',
tunable_type=int,
minimum=1,
default=1)),
'associated_bucks_types':TunableList(description='\n A list of bucks types that are associated with this ranked stat.\n These bucks types may have tuned data that is affected by ranking\n up/down.\n ',
tunable=TunableEnumEntry(description='\n A buck type that is associated with this ranked stat.\n ',
tunable_type=BucksType,
default=(BucksType.INVALID)),
unique_entries=True,
export_modes=ExportModes.All),
'zero_out_on_lock':Tunable(description='\n If checked, when this ranked stat is locked it will zero out\n the value, highest_level, and bucks.\n ',
tunable_type=bool,
default=True),
'headline':OptionalTunable(description='\n If enabled when this relationship track updates we will display\n a headline update to the UI.\n ',
tunable=TunableReference(description='\n The headline that we want to send down.\n ',
manager=(services.get_instance_manager(sims4.resources.Types.HEADLINE))),
tuning_group=GroupNames.UI),
'send_stat_update_for_npcs':Tunable(description="\n If checked then whenever we attempt to send the ranked stat update\n message it will be sent, even if the Sim is an NPC.\n \n NOTE: We don't want to mark very many of the stats like this. This \n is being done to make sure that Fame gets sent so we don't have\n to request Fame when building the tooltip for sims which could be\n really slow.\n ",
tunable_type=bool,
default=False),
'center_bar_tooltip':Tunable(description='\n If true, always put motive panel ranked stat bar tooltip at the center.\n If false, put tooltip on each increment mark instead.\n ',
tunable_type=bool,
default=False,
export_modes=ExportModes.All),
'visible':Tunable(description='\n Whether or not statistic should be sent to client.\n \n NOTE: Please work with your UI engineering partner to determine if this \n should be True. If False, for performance reasons, \n the stat will be removed from the sim if their\n current value matches the default convergence value. \n ',
tunable_type=bool,
default=False,
export_modes=ExportModes.All),
'rank_down_inclusive':Tunable(description='\n If True, rank-down will occur when the stat value hits the\n threshold boundary between ranks. Otherwise, rank down will use the\n default behavior and rank down once the threshold is crossed.\n \n For example: a ranked stat has two levels, level 1 with a range of 0-10, \n level 2 with a range of 10-20, and the current value and level are 15 and 2.\n If the stat was decremented by 5, setting the value to exactly the\n threshold boundary of 10, inclusive rules will calculate the new level as 1,\n whereas exclusive rules will calculate the level as 2. Exclusive rank-downs are\n the default behavior.\n ',
tunable_type=bool,
default=False),
'zero_point':Tunable(description='\n The value of the statistic that represents no progress. Values less\n than the zero-point value represent negative progress. Values greater\n than the zero-point value represent positive progress.\n ',
tunable_type=int,
default=1,
export_modes=ExportModes.All,
tuning_group=GroupNames.UI),
'display_updates_from_add':Tunable(description="\n If True, any rank updates that occur when setting the initial\n value will be sent to UI. If False, only changes in the\n stat's value from its initial value will be displayed.\n ",
tunable_type=bool,
default=True,
tuning_group=GroupNames.UI),
'bar_color_override':TunableColor.TunableColorRGBA(description='\n Tunable color tint on the progress bar.\n ',
export_modes=(
ExportModes.ClientBinary,),
tuning_group=GroupNames.UI),
'starting_rank_display_value':Tunable(description="\n The rank of the stat when it is first added. Used for\n display before the stat has been initialized. \n \n The starting rank is derived from the tuned event\n intervals and the threshold that corresponds to\n the stat's initial value.\n ",
tunable_type=int,
default=1,
export_modes=ExportModes.All,
tuning_group=GroupNames.UI)}
REMOVE_INSTANCE_TUNABLES = ('min_value_tuning', 'max_value_tuning')
def __init__(self, tracker):
self._rank_level = self.initial_rank
self._inclusive_rank_threshold = False
self.highest_level = 0
super().__init__(tracker, self.initial_value)
self._current_event_level = 0
self.previous_event_level = 0
self._notifications_disabled = False
self._initial_loots_awarded = False
self._suppress_level_telemetry = False
@classmethod
def _verify_tuning_callback(cls):
initial_value = cls.initial_value
starting_rank = 1
point_value = 0
for level, level_threshold in enumerate(cls.get_level_list()):
level += 1
point_value += level_threshold
if not point_value > initial_value:
if point_value == initial_value:
if level_threshold == 0:
break
if cls.event_data[level].rank_up:
starting_rank += 1
if cls.starting_rank_display_value != starting_rank:
logger.error(" {}: 'starting_rank_display_value' is {} and should be {}.", cls.__name__, cls.starting_rank_display_value, starting_rank)
@constproperty
def is_ranked():
return True
@property
def rank_level(self):
return self._rank_level
@property
def process_non_selectable_sim(self):
return True
@rank_level.setter
def rank_level(self, value):
self._rank_level = value
services.get_event_manager().process_event((TestEvent.RankedStatisticChange), sim_info=(self.tracker.owner.sim_info))
@property
def highest_rank_achieved(self):
rank_level = self.initial_rank
for i in range(1, self.highest_level + 1):
if self.event_data.get(i).rank_up:
rank_level += 1
return rank_level
@property
def is_visible(self):
return self.tracker is None or self.tracker.owner.is_sim or False
return self.visible
def increase_rank_level(self, new_rank=True, from_add=False):
self.rank_level += 1
self._on_rank_up(new_rank=new_rank, from_add=from_add)
def increase_rank_levels(self, levels):
start_level = self.rank_level
self.rank_level = start_level + levels
self.send_rank_change_update_message(start_level, start_level + levels)
def decrease_rank_level(self):
self.rank_level = max(self.rank_level - 1, 0)
self._on_rank_down()
if not self.tracker.owner.is_npc:
self._handle_level_change_telemetry(self.rank_level)
def _on_rank_up(self, new_rank=True, from_add=False):
current_rank = self.rank_level
if from_add:
if self.display_updates_from_add:
self.send_rank_change_update_message(current_rank - 1, current_rank)
sim_info = self.tracker.owner.sim_info
rank_data = self.rank_tuning.get(current_rank)
rank_up_data = self.rank_up_notification_tuning.get(current_rank)
if rank_data is None:
logger.error('Sim {}: {} is trying to rank up to level {} but there is no rank tuning.', sim_info, self, current_rank)
return
if not from_add:
if sim_info.is_selectable and rank_up_data is not None and self.can_show_notification(rank_up_data):
icon_override = None if rank_data.icon is None else IconInfoData(icon_resource=(rank_data.icon))
if new_rank:
self._show_initial_rank_up_notifications(sim_info, current_rank, rank_data, rank_up_data, icon_override)
else:
self._show_re_rank_up_notifications(sim_info, current_rank, rank_data, rank_up_data, icon_override)
def _show_initial_rank_up_notifications(self, sim_info, current_rank, rank_data, rank_up_data, icon_override):
if rank_up_data.rank_up_notification is not None:
notification = rank_up_data.rank_up_notification(sim_info, resolver=(SingleSimResolver(sim_info)))
notification.show_dialog(icon_override=icon_override, secondary_icon_override=IconInfoData(obj_instance=sim_info),
additional_tokens=(
current_rank,))
if rank_up_data.rank_up_screen_slam is not None:
rank_up_data.rank_up_screen_slam.send_screen_slam_message(sim_info, sim_info, rank_data.rank_name, current_rank)
def _show_re_rank_up_notifications(self, sim_info, current_rank, rank_data, rank_up_data, icon_override):
if rank_up_data.re_rank_up_notification is not None:
notification = rank_up_data.re_rank_up_notification(sim_info, resolver=(SingleSimResolver(sim_info)))
notification.show_dialog(icon_override=icon_override, secondary_icon_override=IconInfoData(obj_instance=sim_info),
additional_tokens=(
current_rank,))
def _on_rank_down(self):
current_rank = self.rank_level
self.send_rank_change_update_message(current_rank + 1, current_rank)
sim_info = self.tracker.owner.sim_info
rank_data = self.rank_tuning.get(current_rank)
rank_down_data = self.rank_down_notification_tuning.get(current_rank)
if rank_data is None:
logger.error('Sim {}: {} is trying to rank down to level {} but there is no rank tuning.', sim_info, self, current_rank)
return
elif sim_info.is_selectable:
if rank_down_data is not None:
if self.can_show_notification(rank_down_data):
if rank_down_data.rank_down_notification is not None:
notification = rank_down_data.rank_down_notification(sim_info, resolver=(SingleSimResolver(sim_info)))
icon_override = None if rank_data.icon is None else IconInfoData(icon_resource=(rank_data.icon))
notification.show_dialog(icon_override=icon_override, secondary_icon_override=IconInfoData(obj_instance=sim_info),
additional_tokens=(
current_rank,))
if rank_down_data.rank_down_screen_slam is not None:
rank_down_data.rank_down_screen_slam.send_screen_slam_message(sim_info, sim_info, rank_data.rank_name, current_rank)
for bucks_type in self.associated_bucks_types:
bucks_tracker = BucksUtils.get_tracker_for_bucks_type(bucks_type, owner_id=(self.tracker.owner.id))
bucks_tracker.validate_perks(bucks_type, self.rank_level)
def on_add(self):
super().on_add()
self.tracker.owner.sim_info.on_add_ranked_statistic()
self.on_stat_event((self.highest_level), (self.get_user_value()), from_add=True)
self.previous_event_level = self.get_user_value()
if self.tracker.owner.is_simulating:
self.apply_initial_loot()
@classmethod
def get_level_list(cls):
return list(cls.event_intervals)
@classmethod
def get_level_threshold(cls, level):
return sum(cls.get_level_list()[:level])
@flexmethod
def _get_level_calculation_function(cls, inst):
if inst is not None:
if inst._inclusive_rank_threshold:
return lambda current_value: current_value <= 0
return lambda current_value: current_value < 0
def _reset_rank_threshold_inclusivity(self):
self._inclusive_rank_threshold = False
def on_initial_startup(self):
super().on_initial_startup()
self.decay_enabled = self.tracker.owner.is_selectable and not self.tracker.owner.is_locked(self)
@staticmethod
def _callback_handler(stat_inst):
stat_inst._reset_rank_threshold_inclusivity()
new_level = stat_inst.get_user_value()
stat_inst.on_stat_event(stat_inst.previous_event_level, new_level)
stat_inst.previous_event_level = new_level
stat_inst.refresh_threshold_callback()
def on_stat_event--- This code section failed: ---
L. 786 0 LOAD_CONST 0
2 STORE_FAST 'batch_rank_levels'
L. 787 4 SETUP_LOOP 200 'to 200'
6_0 COME_FROM 184 '184'
6 LOAD_FAST 'old_level'
8 LOAD_FAST 'new_level'
10 COMPARE_OP <
12 POP_JUMP_IF_FALSE 198 'to 198'
L. 788 14 LOAD_FAST 'old_level'
16 LOAD_CONST 1
18 INPLACE_ADD
20 STORE_FAST 'old_level'
L. 790 22 LOAD_FAST 'self'
24 LOAD_ATTR event_data
26 LOAD_METHOD get
28 LOAD_FAST 'old_level'
30 CALL_METHOD_1 1 '1 positional argument'
32 STORE_FAST 'event_data'
L. 791 34 LOAD_FAST 'event_data'
36 LOAD_CONST None
38 COMPARE_OP is-not
40 POP_JUMP_IF_FALSE 172 'to 172'
L. 792 42 LOAD_FAST 'self'
44 LOAD_ATTR tracker
46 LOAD_ATTR owner
48 LOAD_ATTR is_simulating
50 POP_JUMP_IF_FALSE 158 'to 158'
L. 793 52 LOAD_GLOBAL SingleSimResolver
54 LOAD_FAST 'self'
56 LOAD_ATTR tracker
58 LOAD_ATTR owner
60 CALL_FUNCTION_1 1 '1 positional argument'
62 STORE_FAST 'resolver'
L. 794 64 LOAD_FAST 'old_level'
66 LOAD_FAST 'self'
68 LOAD_ATTR highest_level
70 COMPARE_OP >
72 STORE_FAST 'is_new_level'
L. 795 74 LOAD_FAST 'is_new_level'
76 POP_JUMP_IF_FALSE 110 'to 110'
L. 797 78 SETUP_LOOP 104 'to 104'
80 LOAD_FAST 'event_data'
82 LOAD_ATTR loot
84 GET_ITER
86 FOR_ITER 102 'to 102'
88 STORE_FAST 'loot'
L. 798 90 LOAD_FAST 'loot'
92 LOAD_METHOD apply_to_resolver
94 LOAD_FAST 'resolver'
96 CALL_METHOD_1 1 '1 positional argument'
98 POP_TOP
100 JUMP_BACK 86 'to 86'
102 POP_BLOCK
104_0 COME_FROM_LOOP 78 '78'
L. 801 104 LOAD_FAST 'old_level'
106 LOAD_FAST 'self'
108 STORE_ATTR highest_level
110_0 COME_FROM 76 '76'
L. 802 110 LOAD_FAST 'event_data'
112 LOAD_ATTR rank_up
114 POP_JUMP_IF_FALSE 130 'to 130'
L. 803 116 LOAD_FAST 'self'
118 LOAD_ATTR increase_rank_level
120 LOAD_FAST 'is_new_level'
122 LOAD_FAST 'from_add'
124 LOAD_CONST ('new_rank', 'from_add')
126 CALL_FUNCTION_KW_2 2 '2 total positional and keyword args'
128 POP_TOP
130_0 COME_FROM 114 '114'
L. 804 130 SETUP_LOOP 172 'to 172'
132 LOAD_FAST 'event_data'
134 LOAD_ATTR loot_always
136 GET_ITER
138 FOR_ITER 154 'to 154'
140 STORE_FAST 'loot'
L. 805 142 LOAD_FAST 'loot'
144 LOAD_METHOD apply_to_resolver
146 LOAD_FAST 'resolver'
148 CALL_METHOD_1 1 '1 positional argument'
150 POP_TOP
152 JUMP_BACK 138 'to 138'
154 POP_BLOCK
156 JUMP_FORWARD 172 'to 172'
158_0 COME_FROM 50 '50'
L. 806 158 LOAD_FAST 'event_data'
160 LOAD_ATTR rank_up
162 POP_JUMP_IF_FALSE 172 'to 172'
L. 807 164 LOAD_FAST 'batch_rank_levels'
166 LOAD_CONST 1
168 INPLACE_ADD
170 STORE_FAST 'batch_rank_levels'
172_0 COME_FROM 162 '162'
172_1 COME_FROM 156 '156'
172_2 COME_FROM_LOOP 130 '130'
172_3 COME_FROM 40 '40'
L. 813 172 LOAD_FAST 'self'
174 LOAD_ATTR tracker
176 LOAD_ATTR owner
178 LOAD_ATTR is_npc
180 POP_JUMP_IF_FALSE 186 'to 186'
182 LOAD_FAST 'from_add'
184 POP_JUMP_IF_TRUE 6 'to 6'
186_0 COME_FROM 180 '180'
L. 816 186 LOAD_FAST 'self'
188 LOAD_METHOD _handle_level_change_telemetry
190 LOAD_FAST 'old_level'
192 CALL_METHOD_1 1 '1 positional argument'
194 POP_TOP
196 JUMP_BACK 6 'to 6'
198_0 COME_FROM 12 '12'
198 POP_BLOCK
200_0 COME_FROM_LOOP 4 '4'
L. 818 200 LOAD_FAST 'batch_rank_levels'
202 LOAD_CONST 0
204 COMPARE_OP >
206 POP_JUMP_IF_FALSE 220 'to 220'
L. 819 208 LOAD_FAST 'self'
210 LOAD_METHOD increase_rank_levels
212 LOAD_FAST 'batch_rank_levels'
214 CALL_METHOD_1 1 '1 positional argument'
216 POP_TOP
218 JUMP_FORWARD 232 'to 232'
220_0 COME_FROM 206 '206'
L. 823 220 LOAD_FAST 'self'
222 LOAD_ATTR create_and_send_commodity_update_msg
224 LOAD_CONST False
226 LOAD_CONST ('is_rate_change',)
228 CALL_FUNCTION_KW_1 1 '1 total positional and keyword args'
230 POP_TOP
232_0 COME_FROM 218 '218'
Parse error at or near `COME_FROM_LOOP' instruction at offset 172_2
@contextlib.contextmanager
def suppress_level_up_telemetry(self):
if self._suppress_level_telemetry:
yield
else:
self._suppress_level_telemetry = True
try:
yield
finally:
self._suppress_level_telemetry = False
def _handle_level_change_telemetry(self, level):
if not self._suppress_level_telemetry:
with telemetry_helper.begin_hook(ranked_stat_telemetry_writer, TELEMETRY_HOOK_RANKED_STAT_LEVEL_CHANGE, sim_info=(self._tracker._owner)) as (hook):
hook.write_guid(TELEMETRY_FIELD_RANKED_STAT_TYPE, self.guid64)
hook.write_int(TELEMETRY_FIELD_RANKED_STAT_LEVEL, level)
@sims4.utils.classproperty
def max_value(cls):
return cls.get_max_skill_value()
@sims4.utils.classproperty
def min_value(cls):
return 0
@sims4.utils.classproperty
def best_value(cls):
return cls.max_value
@sims4.utils.classproperty
def max_rank(cls):
_, rank = cls.calculate_level_and_rank(cls.max_value)
return rank
@flexmethod
def convert_to_user_value(cls, inst, value):
if not cls.get_level_list():
return 0
inst_or_cls = inst if inst is not None else cls
level_fnc = inst_or_cls._get_level_calculation_function()
current_value = value
for level, level_threshold in enumerate(cls.get_level_list()):
current_value -= level_threshold
if level_fnc(current_value):
return level
return level + 1
def can_show_notification(self, rank_data):
if self._notifications_disabled:
return False
if rank_data is not None:
if rank_data.show_notification_tests is not None:
resolver = event_testing.resolver.SingleSimResolver(self.tracker.owner)
result = rank_data.show_notification_tests.run_tests(resolver)
if not result:
return False
return True
def _get_next_level_threshold(self):
if self._inclusive_rank_threshold:
return Threshold(self._get_next_level_bound(), operator.gt)
return Threshold(self._get_next_level_bound(), operator.ge)
def set_value(self, value, *args, from_load=False, interaction=None, **kwargs):
old_points = self.get_value()
old_user_value = self.get_user_value()
value_to_set = value
if not from_load:
value_to_set = self._get_valid_value(value, old_user_value)
minimum_level = self._get_minimum_decay_level()
value_to_set = max(value_to_set, minimum_level)
(super().set_value)(value_to_set, *args, from_load=from_load, interaction=interaction, **kwargs)
new_user_value = self.get_user_value()
if not from_load:
if value < old_points:
if self.rank_down_inclusive:
if value == self.get_level_threshold(new_user_value):
self._inclusive_rank_threshold = True
new_user_value = self.get_user_value()
self._handle_level_down(old_user_value, new_user_value)
sim_info = self._tracker._owner
new_points = self.get_value()
stat_type = self.stat_type
if old_points == self.initial_value or old_points != new_points:
services.get_event_manager().process_event((TestEvent.StatValueUpdate), sim_info=sim_info,
statistic=stat_type,
custom_keys=(
stat_type,))
self.send_commodity_progress_msg(is_rate_change=False)
self.send_change_update_message((value - old_points), from_load=from_load)
self.previous_event_level = new_user_value
self.refresh_threshold_callback()
def _update_value(self):
minimum_decay = self._get_minimum_decay_level()
old_value = self._value
old_user_value = self.convert_to_user_value(self._value)
super()._update_value(minimum_decay_value=minimum_decay)
new_value = self._value
new_user_value = self.convert_to_user_value(self._value)
self._handle_level_down(old_user_value, new_user_value)
if old_user_value > new_user_value:
self.previous_event_level = new_user_value
self.refresh_threshold_callback()
stat_type = self.stat_type
if new_value > old_value:
sim_info = self._tracker._owner if self._tracker is not None else None
services.get_event_manager().process_event((TestEvent.StatValueUpdate), sim_info=sim_info,
statistic=stat_type,
custom_keys=(
stat_type,))
def _get_minimum_decay_level(self):
min_rank = self.min_decay_per_highest_level_achieved.get(self.highest_level, None)
if min_rank is None:
return 0
points = self.points_to_rank(min_rank)
return points
def _handle_level_down(self, old_value, new_value):
while new_value < old_value:
event_data = self.event_data.get(old_value)
if event_data is not None:
resolver = SingleSimResolver(self.tracker.owner)
for loot in event_data.level_down_loot:
loot.apply_to_resolver(resolver)
if event_data.rank_up:
self.decrease_rank_level()
old_value -= 1
def get_next_rank_level(self):
current_value = self.get_user_value()
index = current_value + 1
if index > len(self.event_data):
return current_value
while not self.event_data[index].rank_up:
if index == len(self.event_data):
break
index += 1
return index
@constproperty
def remove_on_convergence():
return False
def send_commodity_progress_msg(self, is_rate_change=True):
self.create_and_send_commodity_update_msg(is_rate_change=is_rate_change)
@classmethod
def points_to_level(cls, event_level):
level = 0
running_sum = 0
level_list = cls.get_level_list()
while level < len(level_list) and level < event_level:
running_sum += level_list[level]
level += 1
return running_sum
@classmethod
def points_to_rank(cls, rank_level):
rank = cls.initial_rank
level = 0
running_sum = 0
level_list = cls.get_level_list()
while rank < rank_level and level < len(level_list):
event_data = cls.event_data.get(level)
if event_data is not None:
if cls.event_data[level].rank_up:
rank += 1
if rank < rank_level:
running_sum += level_list[level]
level += 1
return running_sum
def points_to_current_rank(self):
return self.points_to_rank(self.rank_level)
def create_and_send_commodity_update_msg(self, is_rate_change=True, allow_npc=False, from_add=False):
ranked_stat_msg = Commodities_pb2.RankedStatisticProgressUpdate()
ranked_stat_msg.stat_id = self.guid64
ranked_stat_msg.change_rate = self.get_change_rate()
ranked_stat_msg.rank = self.rank_level
difference = self.get_value() - self.points_to_current_rank()
ranked_stat_msg.curr_rank_points = int(difference) if difference > 0 else 0
send_sim_ranked_stat_update_message((self.tracker.owner), ranked_stat_msg, allow_npc=(allow_npc or self.send_stat_update_for_npcs))
@classmethod
def send_commodity_update_message(cls, sim_info, old_value, new_value):
commodity_tracker = sim_info.commodity_tracker
if commodity_tracker is None:
return
stat_instance = commodity_tracker.get_statistic(cls)
if stat_instance is None:
return
stat_instance.create_and_send_commodity_update_msg(is_rate_change=True)
def send_change_update_message(self, amount, from_load=False):
if from_load:
return
if self.headline is None:
return
sim = self.tracker.owner
if sim.is_selectable:
self.headline.send_headline_message(sim, amount)
def send_rank_change_update_message(self, previous_rank, current_rank):
msg = Commodities_pb2.RankedStatisticRankChangedUpdate()
msg.stat_id = self.guid64
msg.previous_rank = previous_rank
msg.current_rank = current_rank
send_sim_ranked_stat_change_rank_change_update_message(self.tracker.owner, msg)
self.send_commodity_progress_msg()
def on_sim_ready_to_simulate(self):
level = self.get_user_value()
event_data = self.event_data.get(level)
if event_data is not None:
resolver = SingleSimResolver(self.tracker.owner)
for loot in event_data.loot_always_on_load:
loot.apply_to_resolver(resolver)
self.apply_initial_loot()
def apply_initial_loot(self):
if not self.initial_loot:
return
if self._initial_loots_awarded:
return
resolver = SingleSimResolver(self.tracker.owner)
for loot in self.initial_loot:
loot.apply_to_resolver(resolver)
self._initial_loots_awarded = True
def _get_valid_value(self, value, old_score):
new_score = self.convert_to_user_value(value)
if old_score <= new_score:
resolver = SingleSimResolver(self.tracker.owner)
while old_score <= new_score:
old_score += 1
event_data = self.event_data.get(old_score)
if event_data is not None:
points = event_data.tests.run_tests(resolver=resolver) or self.points_to_level(old_score - 1)
return points
return value
def on_lock(self, action_on_lock):
self._notifications_disabled = True
should_zero_out = self.zero_out_on_lock or action_on_lock == StatisticLockAction.USE_MIN_VALUE_TUNING
if should_zero_out:
self.highest_level = 0
super().on_lock(action_on_lock)
if should_zero_out:
self.reset_bucks()
self._notifications_disabled = False
def reset_bucks(self):
for bucks_type in self.associated_bucks_types:
bucks_tracker = BucksUtils.get_tracker_for_bucks_type(bucks_type, self.tracker.owner.id)
if bucks_tracker is not None:
bucks_tracker.try_modify_bucks(bucks_type, -bucks_tracker.get_bucks_amount_for_type(bucks_type))
@flexmethod
def calculate_level_and_rank(cls, inst, value):
level = 0
rank = cls.initial_rank
inst_or_cls = inst if inst is not None else cls
level_fnc = inst_or_cls._get_level_calculation_function()
for points_to_next_level in cls.get_level_list():
value -= points_to_next_level
if level_fnc(value):
break
level += 1
level_data = cls.event_data.get(level)
if level_data.rank_up:
rank += 1
return (
level, rank)
def set_level_and_rank(self):
level, rank = self.calculate_level_and_rank(self.get_value())
self.previous_event_level = level
self.rank_level = rank
def should_display_delayed_decay_warning(self):
if self.highest_level == 0:
return False
return super().should_display_delayed_decay_warning()
@classproperty
def valid_for_stat_testing(cls):
return True
@classmethod
def load_statistic_data(cls, tracker, data):
super().load_statistic_data(tracker, data)
stat = tracker.get_statistic(cls)
if stat is not None:
stat._initial_loots_awarded = data.initial_loots_awarded
stat._inclusive_rank_threshold = data.inclusive_rank_threshold
stat.set_level_and_rank()
stat.highest_level = data.highest_level
stat.load_time_of_last_value_change(data)
stat.fixup_callbacks_during_load()
@classmethod
def save_for_delayed_active_lod(cls, commodity_proto, commodities, skills, ranked_statistics):
ranked_statistics.append(commodity_proto)
def get_save_message(self, tracker):
message = protocols.RankedStatistic()
message.name_hash = self.guid64
message.value = self.get_saved_value()
message.highest_level = self.highest_level
message.initial_loots_awarded = self._initial_loots_awarded
message.inclusive_rank_threshold = self._inclusive_rank_threshold
if self._time_of_last_value_change:
message.time_of_last_value_change = self._time_of_last_value_change.absolute_ticks()
return message
def save_statistic(self, commodities, skills, ranked_statistics, tracker):
ranked_statistics.append(self.get_save_message(tracker)) | 20,168 | 18,954 | 981 |
739d41393a77764cb1e6c4ffc79168adc816618e | 280 | py | Python | main.py | umutykaya/yelp_business_search_api | aee604733f69b88c94121db652745a8243cd1e6a | [
"MIT"
] | null | null | null | main.py | umutykaya/yelp_business_search_api | aee604733f69b88c94121db652745a8243cd1e6a | [
"MIT"
] | null | null | null | main.py | umutykaya/yelp_business_search_api | aee604733f69b88c94121db652745a8243cd1e6a | [
"MIT"
] | null | null | null | # Import utils submodule
import api.api
# Decide to start seeing other people
api.api.we_need_to_talk(break_up=False)
import api
# Create instance of MyClass
my_instance = api.AppClass(value='class attribute value')
# Print out class attribute value
print(my_instance.attribute) | 25.454545 | 57 | 0.807143 | # Import utils submodule
import api.api
# Decide to start seeing other people
api.api.we_need_to_talk(break_up=False)
import api
# Create instance of MyClass
my_instance = api.AppClass(value='class attribute value')
# Print out class attribute value
print(my_instance.attribute) | 0 | 0 | 0 |
785be2e4d3c1bf71b5bc23604da19bfa5ca45d21 | 2,421 | py | Python | autolens/point/fit_point/point_dataset.py | Jammy2211/AutoLens | bc132a21d1a52248f08f198474e29f985e365d85 | [
"MIT"
] | null | null | null | autolens/point/fit_point/point_dataset.py | Jammy2211/AutoLens | bc132a21d1a52248f08f198474e29f985e365d85 | [
"MIT"
] | 10 | 2017-12-22T11:39:33.000Z | 2018-01-30T09:13:16.000Z | autolens/point/fit_point/point_dataset.py | Jammy2211/AutoLens | bc132a21d1a52248f08f198474e29f985e365d85 | [
"MIT"
] | null | null | null | import numba
import autogalaxy as ag
from autolens.point.point_dataset import PointDataset
from autolens.point.point_solver import PointSolver
from autolens.point.fit_point.fluxes import FitFluxes
from autolens.point.fit_point.positions_image import FitPositionsImage
from autolens.point.fit_point.positions_source import FitPositionsSource
from autolens.lens.ray_tracing import Tracer
from autolens import exc
| 32.28 | 89 | 0.615035 | import numba
import autogalaxy as ag
from autolens.point.point_dataset import PointDataset
from autolens.point.point_solver import PointSolver
from autolens.point.fit_point.fluxes import FitFluxes
from autolens.point.fit_point.positions_image import FitPositionsImage
from autolens.point.fit_point.positions_source import FitPositionsSource
from autolens.lens.ray_tracing import Tracer
from autolens import exc
class FitPointDataset:
def __init__(
self, point_dataset: PointDataset, tracer: Tracer, point_solver: PointSolver
):
self.point_dataset = point_dataset
point_profile = tracer.extract_profile(profile_name=point_dataset.name)
try:
if isinstance(point_profile, ag.ps.PointSourceChi):
self.positions = FitPositionsSource(
name=point_dataset.name,
positions=point_dataset.positions,
noise_map=point_dataset.positions_noise_map,
tracer=tracer,
point_profile=point_profile,
)
else:
self.positions = FitPositionsImage(
name=point_dataset.name,
positions=point_dataset.positions,
noise_map=point_dataset.positions_noise_map,
point_solver=point_solver,
tracer=tracer,
point_profile=point_profile,
)
except exc.PointExtractionException:
self.positions = None
except (AttributeError, numba.errors.TypingError) as e:
raise exc.FitException from e
try:
self.flux = FitFluxes(
name=point_dataset.name,
fluxes=point_dataset.fluxes,
noise_map=point_dataset.fluxes_noise_map,
positions=point_dataset.positions,
tracer=tracer,
)
except exc.PointExtractionException:
self.flux = None
@property
def log_likelihood(self) -> float:
log_likelihood_positions = (
self.positions.log_likelihood if self.positions is not None else 0.0
)
log_likelihood_flux = self.flux.log_likelihood if self.flux is not None else 0.0
return log_likelihood_positions + log_likelihood_flux
| 1,896 | 73 | 24 |
88257a5631f85a303cf369ae7a877f11db6c90b8 | 2,642 | py | Python | pysptools/examples/ex_hematite_v2.py | ctherien/pysptools | fbcd3ecaa7ab27f0158b28b4327537c3e75db160 | [
"Apache-2.0"
] | 35 | 2016-03-20T15:25:07.000Z | 2022-03-29T04:05:56.000Z | pysptools/examples/ex_hematite_v2.py | ctherien/pysptools | fbcd3ecaa7ab27f0158b28b4327537c3e75db160 | [
"Apache-2.0"
] | 12 | 2016-03-24T13:38:52.000Z | 2021-04-06T07:11:19.000Z | pysptools/examples/ex_hematite_v2.py | ctherien/pysptools | fbcd3ecaa7ab27f0158b28b4327537c3e75db160 | [
"Apache-2.0"
] | 14 | 2016-03-21T17:26:46.000Z | 2022-01-18T08:39:27.000Z | """
Plot a quartz class map for a drill core HSI cube.
"""
from __future__ import print_function
import os
import os.path as osp
import matplotlib.pyplot as plt
import numpy as np
import pysptools.util as util
import pysptools.eea as eea
import pysptools.abundance_maps as amp
if __name__ == '__main__':
# Load the cube
data_path = os.environ['PYSPTOOLS_DATA']
home = os.environ['HOME']
result_path = os.path.join(home, 'results')
sample = 'hematite.hdr'
data_file = osp.join(data_path, sample)
data, header = util.load_ENVI_file(data_file)
if osp.exists(result_path) == False:
os.makedirs(result_path)
axes = parse_ENVI_header(header)
# Telops cubes are flipped left-right
# Flipping them again restore the orientation
data = np.fliplr(data)
U = get_endmembers(data, axes, 4, result_path)
amaps = gen_abundance_maps(data, U, result_path)
# EM4 == quartz
quartz = amaps[:,:,3]
plot(quartz, 'spectral', 'quartz', result_path)
# EM1 == background, we use the backgroud to isolate the drill core
# and define the mask
mask = (amaps[:,:,0] < 0.2)
plot(mask, 'spectral', 'mask', result_path)
# Plot the quartz in color and the hematite in gray
plot(np.logical_and(mask == 1, quartz <= 0.001) + quartz, 'spectral', 'hematite+quartz', result_path)
# pixels stat
rock_surface = np.sum(mask)
quartz_surface = np.sum(quartz > 0.16)
print('Some statistics')
print(' Drill core surface (mask) in pixels:', rock_surface)
print(' Quartz surface in pixels:', quartz_surface)
print(' Hematite surface in pixels:', rock_surface - quartz_surface)
| 28.408602 | 106 | 0.641938 | """
Plot a quartz class map for a drill core HSI cube.
"""
from __future__ import print_function
import os
import os.path as osp
import matplotlib.pyplot as plt
import numpy as np
import pysptools.util as util
import pysptools.eea as eea
import pysptools.abundance_maps as amp
def parse_ENVI_header(head):
ax = {}
ax['wavelength'] = head['wavelength']
ax['x'] = 'Wavelength - '+head['z plot titles'][0]
ax['y'] = head['z plot titles'][1]
return ax
def get_endmembers(data, info, q, path):
print('Endmembers extraction with NFINDR')
ee = eea.NFINDR()
U = ee.extract(data, q, maxit=5, normalize=True, ATGP_init=True)
ee.plot(path, axes=info)
return U
def gen_abundance_maps(data, U, result_path):
print('Abundance maps with FCLS')
fcls = amp.FCLS()
amap = fcls.map(data, U, normalize=True)
fcls.plot(result_path, colorMap='jet')
return amap
def plot(image, colormap, desc, path):
plt.ioff()
img = plt.imshow(image, interpolation='none')
img.set_cmap(colormap)
plt.colorbar()
fout = osp.join(path, 'plot_{0}.png'.format(desc))
plt.savefig(fout)
plt.clf()
if __name__ == '__main__':
# Load the cube
data_path = os.environ['PYSPTOOLS_DATA']
home = os.environ['HOME']
result_path = os.path.join(home, 'results')
sample = 'hematite.hdr'
data_file = osp.join(data_path, sample)
data, header = util.load_ENVI_file(data_file)
if osp.exists(result_path) == False:
os.makedirs(result_path)
axes = parse_ENVI_header(header)
# Telops cubes are flipped left-right
# Flipping them again restore the orientation
data = np.fliplr(data)
U = get_endmembers(data, axes, 4, result_path)
amaps = gen_abundance_maps(data, U, result_path)
# EM4 == quartz
quartz = amaps[:,:,3]
plot(quartz, 'spectral', 'quartz', result_path)
# EM1 == background, we use the backgroud to isolate the drill core
# and define the mask
mask = (amaps[:,:,0] < 0.2)
plot(mask, 'spectral', 'mask', result_path)
# Plot the quartz in color and the hematite in gray
plot(np.logical_and(mask == 1, quartz <= 0.001) + quartz, 'spectral', 'hematite+quartz', result_path)
# pixels stat
rock_surface = np.sum(mask)
quartz_surface = np.sum(quartz > 0.16)
print('Some statistics')
print(' Drill core surface (mask) in pixels:', rock_surface)
print(' Quartz surface in pixels:', quartz_surface)
print(' Hematite surface in pixels:', rock_surface - quartz_surface)
| 797 | 0 | 100 |
54026340f4cf4ce095dffe423dda6e49920ea4bc | 546 | py | Python | task/task1.py | SofyaGrobova/lab14 | 7d0e8b8d56acc9c6b86e9e0303b53762f470be3b | [
"MIT"
] | null | null | null | task/task1.py | SofyaGrobova/lab14 | 7d0e8b8d56acc9c6b86e9e0303b53762f470be3b | [
"MIT"
] | null | null | null | task/task1.py | SofyaGrobova/lab14 | 7d0e8b8d56acc9c6b86e9e0303b53762f470be3b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import math
if __name__ == '__main__':
print(func(5, 3)())
print(func(8, 10, 1)())
print(func(3, 5, 0)())
print(func(2, 2, 1)())
| 21 | 73 | 0.47619 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import math
def func(a, b, type=0):
def func2():
if type == 0:
res = a * b / 2
res = f'Для значений {a}, {b} площадь треугольника = {res}'
return res
elif type == 1:
res = a * b
res = f'Для значений {a}, {b} площадь прямоугольника = {res}'
return res
return func2
if __name__ == '__main__':
print(func(5, 3)())
print(func(8, 10, 1)())
print(func(3, 5, 0)())
print(func(2, 2, 1)())
| 388 | 0 | 23 |
d8d1c0f3e83aa3b5a43247e7f58ad9de5aa9b287 | 2,477 | py | Python | get_premium/tests.py | BarunBlog/Link_People | 1ffd07bc5b31a715133c99efbbb478efe18d632b | [
"MIT"
] | null | null | null | get_premium/tests.py | BarunBlog/Link_People | 1ffd07bc5b31a715133c99efbbb478efe18d632b | [
"MIT"
] | null | null | null | get_premium/tests.py | BarunBlog/Link_People | 1ffd07bc5b31a715133c99efbbb478efe18d632b | [
"MIT"
] | null | null | null | '''from django.contrib.auth import get_user_model
from django.test import TestCase #an extension of Python’s TestCase
from django.urls import reverse, resolve
from django.test import Client
from .models import PremiumBlog
from .views import (
BlogListView,
BlogDetailView,
)
class CustomUserTests(TestCase):
def test_create_user(self):
User = get_user_model()
user = User.objects.create_user(
username='partho',
email='partho007@gmail.com',
first_name='Partho',
last_name='Bhattacharjee',
country='Bangladesh',
city_or_district='Sylhet'
)
user.set_password('testpass123')
user.save()
self.assertEqual(user.email, 'partho007@gmail.com')
self.assertEqual(user.country, 'Bangladesh')
self.assertEqual(user.city_or_district, 'Sylhet')
self.assertTrue(user.is_active)
self.assertFalse(user.is_staff)
self.assertFalse(user.is_superuser)
class BlogTests(TestCase):
def setUp(self):
c = Client()
c.login(email='partho007@gmail.com', password='testpass123')
url = reverse('blog_list')
self.response = self.client.get(url)
def test_job_post(self):
post = PremiumBlog.objects.create(
Author='Barun',
Title='What is Django?',
Description='Python Framework',
)
self.assertEqual(post.Author, 'Barun')
self.assertEqual(post.Title, 'What is Django?')
self.assertEqual(post.Description, 'Python Framework')
def test_job_list_template(self):
self.assertEqual(self.response.status_code, 200)
self.assertTemplateUsed(self.response, 'premium/blog_list.html')
self.assertContains(self.response, 'Search your blog here')
self.assertNotContains(
self.response, 'Hi there! I should not be on the page.')
def job_detail_view(self):
post = PremiumBlog.objects.create(
Author='Barun',
Title='What is Django?',
Description='Python Framework',
)
response = self.client.get(post.get_absolute_url())
no_response = self.client.get('/jobs/12345/')
self.assertEqual(response.status_code, 200)
self.assertEqual(no_response.status_code, 404)
self.assertContains(response, 'What is Django?')
self.assertTemplateUsed(response, 'premium/blog_detail.html')''' | 30.207317 | 72 | 0.644732 | '''from django.contrib.auth import get_user_model
from django.test import TestCase #an extension of Python’s TestCase
from django.urls import reverse, resolve
from django.test import Client
from .models import PremiumBlog
from .views import (
BlogListView,
BlogDetailView,
)
class CustomUserTests(TestCase):
def test_create_user(self):
User = get_user_model()
user = User.objects.create_user(
username='partho',
email='partho007@gmail.com',
first_name='Partho',
last_name='Bhattacharjee',
country='Bangladesh',
city_or_district='Sylhet'
)
user.set_password('testpass123')
user.save()
self.assertEqual(user.email, 'partho007@gmail.com')
self.assertEqual(user.country, 'Bangladesh')
self.assertEqual(user.city_or_district, 'Sylhet')
self.assertTrue(user.is_active)
self.assertFalse(user.is_staff)
self.assertFalse(user.is_superuser)
class BlogTests(TestCase):
def setUp(self):
c = Client()
c.login(email='partho007@gmail.com', password='testpass123')
url = reverse('blog_list')
self.response = self.client.get(url)
def test_job_post(self):
post = PremiumBlog.objects.create(
Author='Barun',
Title='What is Django?',
Description='Python Framework',
)
self.assertEqual(post.Author, 'Barun')
self.assertEqual(post.Title, 'What is Django?')
self.assertEqual(post.Description, 'Python Framework')
def test_job_list_template(self):
self.assertEqual(self.response.status_code, 200)
self.assertTemplateUsed(self.response, 'premium/blog_list.html')
self.assertContains(self.response, 'Search your blog here')
self.assertNotContains(
self.response, 'Hi there! I should not be on the page.')
def job_detail_view(self):
post = PremiumBlog.objects.create(
Author='Barun',
Title='What is Django?',
Description='Python Framework',
)
response = self.client.get(post.get_absolute_url())
no_response = self.client.get('/jobs/12345/')
self.assertEqual(response.status_code, 200)
self.assertEqual(no_response.status_code, 404)
self.assertContains(response, 'What is Django?')
self.assertTemplateUsed(response, 'premium/blog_detail.html')''' | 0 | 0 | 0 |
406e78c5faea77fb778787c685d5a2bd4b6c7a1d | 4,208 | py | Python | acme/utils/observers/action_metrics_test.py | wookayin/acme | 71b2ab8577a118c103718f034fa62c5ad2c0fd97 | [
"Apache-2.0"
] | null | null | null | acme/utils/observers/action_metrics_test.py | wookayin/acme | 71b2ab8577a118c103718f034fa62c5ad2c0fd97 | [
"Apache-2.0"
] | null | null | null | acme/utils/observers/action_metrics_test.py | wookayin/acme | 71b2ab8577a118c103718f034fa62c5ad2c0fd97 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for action_metrics_observers."""
from acme import specs
from acme.testing import fakes
from acme.utils.observers import action_metrics
import dm_env
import numpy as np
from absl.testing import absltest
_FAKE_ENV = _make_fake_env()
_TIMESTEP = _FAKE_ENV.reset()
if __name__ == '__main__':
absltest.main()
| 32.875 | 77 | 0.643774 | # Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for action_metrics_observers."""
from acme import specs
from acme.testing import fakes
from acme.utils.observers import action_metrics
import dm_env
import numpy as np
from absl.testing import absltest
def _make_fake_env() -> dm_env.Environment:
env_spec = specs.EnvironmentSpec(
observations=specs.Array(shape=(10, 5), dtype=np.float32),
actions=specs.BoundedArray(
shape=(1,), dtype=np.float32, minimum=-100., maximum=100.),
rewards=specs.Array(shape=(), dtype=np.float32),
discounts=specs.BoundedArray(
shape=(), dtype=np.float32, minimum=0., maximum=1.),
)
return fakes.Environment(env_spec, episode_length=10)
_FAKE_ENV = _make_fake_env()
_TIMESTEP = _FAKE_ENV.reset()
class ActionMetricsTest(absltest.TestCase):
def test_observe_nothing(self):
observer = action_metrics.ContinuousActionObserver()
self.assertEqual({}, observer.get_metrics())
def test_observe_first(self):
observer = action_metrics.ContinuousActionObserver()
observer.observe_first(env=_FAKE_ENV, timestep=_TIMESTEP)
self.assertEqual({}, observer.get_metrics())
def test_observe_single_step(self):
observer = action_metrics.ContinuousActionObserver()
observer.observe_first(env=_FAKE_ENV, timestep=_TIMESTEP)
observer.observe(env=_FAKE_ENV, timestep=_TIMESTEP, action=np.array([1]))
self.assertEqual(
{
'action[0]_max': 1,
'action[0]_min': 1,
'action[0]_mean': 1,
'action[0]_p50': 1,
},
observer.get_metrics(),
)
def test_observe_multiple_step(self):
observer = action_metrics.ContinuousActionObserver()
observer.observe_first(env=_FAKE_ENV, timestep=_TIMESTEP)
observer.observe(env=_FAKE_ENV, timestep=_TIMESTEP, action=np.array([1]))
observer.observe(env=_FAKE_ENV, timestep=_TIMESTEP, action=np.array([4]))
observer.observe(env=_FAKE_ENV, timestep=_TIMESTEP, action=np.array([5]))
self.assertEqual(
{
'action[0]_max': 5,
'action[0]_min': 1,
'action[0]_mean': 10 / 3,
'action[0]_p50': 4,
},
observer.get_metrics(),
)
def test_observe_zero_dimensions(self):
observer = action_metrics.ContinuousActionObserver()
observer.observe_first(env=_FAKE_ENV, timestep=_TIMESTEP)
observer.observe(env=_FAKE_ENV, timestep=_TIMESTEP, action=np.array(1))
self.assertEqual(
{
'action[]_max': 1,
'action[]_min': 1,
'action[]_mean': 1,
'action[]_p50': 1,
},
observer.get_metrics(),
)
def test_observe_multiple_dimensions(self):
observer = action_metrics.ContinuousActionObserver()
observer.observe_first(env=_FAKE_ENV, timestep=_TIMESTEP)
observer.observe(
env=_FAKE_ENV, timestep=_TIMESTEP, action=np.array([[1, 2], [3, 4]]))
np.testing.assert_equal(
{
'action[0, 0]_max': 1,
'action[0, 0]_min': 1,
'action[0, 0]_mean': 1,
'action[0, 0]_p50': 1,
'action[0, 1]_max': 2,
'action[0, 1]_min': 2,
'action[0, 1]_mean': 2,
'action[0, 1]_p50': 2,
'action[1, 0]_max': 3,
'action[1, 0]_min': 3,
'action[1, 0]_mean': 3,
'action[1, 0]_p50': 3,
'action[1, 1]_max': 4,
'action[1, 1]_min': 4,
'action[1, 1]_mean': 4,
'action[1, 1]_p50': 4,
},
observer.get_metrics(),
)
if __name__ == '__main__':
absltest.main()
| 3,050 | 22 | 196 |
03e5c1084cdb72e0eb5d8f98d04c7100f12e582e | 31,595 | py | Python | SMPyBandits/Policies/DoublingTrickWrapper.py | balbok0/SMPyBandits | c8ff765687989e0c20ab42c2e2e1d8440923225b | [
"MIT"
] | 309 | 2018-03-03T22:07:59.000Z | 2022-03-26T08:15:58.000Z | Policies/DoublingTrickWrapper.py | 98k-bot/SMPyBandits | 35e675bde29dafbec68288fcfcd14ef3b0f058b2 | [
"MIT"
] | 125 | 2018-02-27T22:54:03.000Z | 2021-11-05T10:50:15.000Z | Policies/DoublingTrickWrapper.py | 98k-bot/SMPyBandits | 35e675bde29dafbec68288fcfcd14ef3b0f058b2 | [
"MIT"
] | 60 | 2018-04-30T20:54:24.000Z | 2022-02-21T22:41:46.000Z | # -*- coding: utf-8 -*-
r""" A policy that acts as a wrapper on another policy `P`, assumed to be *horizon dependent* (has to known :math:`T`), by implementing a "doubling trick":
- starts to assume that :math:`T=T_0=1000`, and run the policy :math:`P(T_0)`, from :math:`t=1` to :math:`t=T_0`,
- if :math:`t > T_0`, then the "doubling trick" is performed, by either re-initializing or just changing the parameter `horizon` of the policy P, for instance with :math:`T_2 = 10 \times T_0`,
- and keep doing this until :math:`t = T`.
.. note::
This is implemented in a very generic way, with simply a function `next_horizon(horizon)` that gives the next horizon to try when crossing the current guess.
It can be a simple linear function (`next_horizon(horizon) = horizon + 100`), a geometric growth to have the "real" doubling trick (`next_horizon(horizon) = horizon * 10`), or even functions growing exponentially fast (`next_horizon(horizon) = horizon ** 1.1`, `next_horizon(horizon) = horizon ** 1.5`, `next_horizon(horizon) = horizon ** 2`).
.. note::
My guess is that this "doubling trick" wrapping policy can only be efficient (for stochastic problems) if:
- the underlying policy `P` is a very efficient horizon-dependent algorithm, e.g., the :class:`Policies.ApproximatedFHGittins`,
- the growth function `next_horizon` is growing faster than any geometric rate, so that the number of refresh is :math:`o(\log T)` and not :math:`O(\log T)`.
.. seealso::
Reference: [[What the Doubling Trick Can or Can't Do for Multi-Armed Bandits, Lilian Besson and Emilie Kaufmann, 2018]](https://hal.inria.fr/hal-01736357), to be presented soon.
.. warning::
Interface: If `FULL_RESTART=False` (default), the underlying algorithm is recreated at every breakpoint,
instead its attribute `horizon` or `_horizon` is updated. Be sure that this is enough to really
change the internal value used by the policy. Some policy use T only once to compute others parameters,
which should be updated as well. A manual implementation of the `__setattr__` method can help.
"""
from __future__ import division, print_function # Python 2 compatibility
__author__ = "Lilian Besson"
__version__ = "0.9"
import numpy as np
try:
from .BaseWrapperPolicy import BaseWrapperPolicy
from .UCBH import UCBH
except ImportError:
from BaseWrapperPolicy import BaseWrapperPolicy
from UCBH import UCBH
try:
from .usenumba import jit # Import numba.jit or a dummy jit(f)=f
except (ValueError, ImportError, SystemError):
from usenumba import jit # Import numba.jit or a dummy jit(f)=f
#: Default horizon-dependent policy
default_horizonDependent_policy = UCBH
#: Default constant to know what to do when restarting the underlying policy with a new horizon parameter.
#:
#: - `True` means that a new policy, initialized from scratch, will be created at every breakpoint.
#: - `False` means that the same policy object is used but just its attribute `horizon` is updated (default).
FULL_RESTART = True
FULL_RESTART = False
#: Default horizon, used for the first step.
DEFAULT_FIRST_HORIZON = 200
#: Default stepsize for the arithmetic horizon progression.
ARITHMETIC_STEP = 10 * DEFAULT_FIRST_HORIZON
ARITHMETIC_STEP = 1 * DEFAULT_FIRST_HORIZON
@jit
def next_horizon__arithmetic(i, horizon):
r""" The arithmetic horizon progression function:
.. math::
T &\mapsto T + 100,\\
T_i &:= T_0 + 100 \times i.
"""
return horizon + ARITHMETIC_STEP
next_horizon__arithmetic.__latex_name__ = "arithm"
next_horizon__arithmetic.__latex_name__ = r"$T_i = {} + {} \times i$".format(DEFAULT_FIRST_HORIZON, ARITHMETIC_STEP)
#: Default multiplicative constant for the geometric horizon progression.
GEOMETRIC_STEP = 2
@jit
def next_horizon__geometric(i, horizon):
r""" The geometric horizon progression function:
.. math::
T &\mapsto T \times 2,\\
T_i &:= T_0 2^i.
"""
return horizon * GEOMETRIC_STEP
next_horizon__geometric.__latex_name__ = "geom"
next_horizon__geometric.__latex_name__ = r"$T_i = {} \times {}^i$".format(DEFAULT_FIRST_HORIZON, GEOMETRIC_STEP)
#: Default exponential constant for the exponential horizon progression.
EXPONENTIAL_STEP = 1.5
@jit
def next_horizon__exponential(i, horizon):
r""" The exponential horizon progression function:
.. math::
T &\mapsto \left\lfloor T^{1.5} \right\rfloor,\\
T_i &:= \left\lfloor T_0^{1.5^i} \right\rfloor.
"""
return int(np.floor(horizon ** EXPONENTIAL_STEP))
next_horizon__exponential.__latex_name__ = "exp"
next_horizon__exponential.__latex_name__ = r"$T_i = {}^{}$".format(DEFAULT_FIRST_HORIZON, r"{%.3g^i}" % EXPONENTIAL_STEP)
#: Default exponential constant for the slow exponential horizon progression.
SLOW_EXPONENTIAL_STEP = 1.1
@jit
def next_horizon__exponential_slow(i, horizon):
r""" The exponential horizon progression function:
.. math::
T &\mapsto \left\lfloor T^{1.1} \right\rfloor,\\
T_i &:= \left\lfloor T_0^{1.1^i} \right\rfloor.
"""
return int(np.floor(horizon ** SLOW_EXPONENTIAL_STEP))
next_horizon__exponential_slow.__latex_name__ = "slow exp"
next_horizon__exponential_slow.__latex_name__ = r"$T_i = {}^{}$".format(DEFAULT_FIRST_HORIZON, r"{%.3g^i}" % SLOW_EXPONENTIAL_STEP)
#: Default exponential constant for the fast exponential horizon progression.
FAST_EXPONENTIAL_STEP = 2
@jit
def next_horizon__exponential_fast(i, horizon):
r""" The exponential horizon progression function:
.. math::
T &\mapsto \lfloor T^{2} \rfloor,\\
T_i &:= \lfloor T_0^{2^i} \rfloor.
"""
return int(np.floor(horizon ** 2))
next_horizon__exponential_fast.__latex_name__ = "fast exp"
next_horizon__exponential_fast.__latex_name__ = r"$T_i = {}^{}$".format(DEFAULT_FIRST_HORIZON, r"{%.3g^i}" % FAST_EXPONENTIAL_STEP)
#: Default constant :math:`\alpha` for the generic exponential sequence.
ALPHA = 2
#: Default constant :math:`\beta` for the generic exponential sequence.
BETA = 2
def next_horizon__exponential_generic(i, horizon):
r""" The generic exponential horizon progression function:
.. math:: T_i := \left\lfloor \frac{T_0}{a} a^{b^i} \right\rfloor.
"""
return int((DEFAULT_FIRST_HORIZON / ALPHA) * ALPHA ** (BETA ** i))
# return int(ALPHA * np.floor(horizon ** BETA))
next_horizon__exponential_generic.__latex_name__ = r"exp $a={:.3g}$, $b={:.3g}$".format(ALPHA, BETA)
next_horizon__exponential_generic.__latex_name__ = r"$T_i = ({}/{}) {}^{}$".format(DEFAULT_FIRST_HORIZON, ALPHA, ALPHA, r"{%.3g^i}" % BETA)
#: Chose the default horizon growth function.
# default_next_horizon = next_horizon__arithmetic
# default_next_horizon = next_horizon__geometric
# default_next_horizon = next_horizon__geometric
# default_next_horizon = next_horizon__exponential_fast
default_next_horizon = next_horizon__exponential_slow
# --- Utility function
def breakpoints(next_horizon, first_horizon, horizon, debug=False):
r""" Return the list of restart point (breakpoints), if starting from ``first_horizon`` to ``horizon`` with growth function ``next_horizon``.
- Also return the gap between the last guess for horizon and the true horizon. This gap should not be too large.
- Nicely print all the values if ``debug=True``.
- First examples:
>>> first_horizon = 1000
>>> horizon = 30000
>>> breakpoints(next_horizon__arithmetic, first_horizon, horizon) # doctest: +ELLIPSIS
([1000, 1200, 1400, ..., 29800, 30000], 0)
>>> breakpoints(next_horizon__geometric, first_horizon, horizon)
([1000, 2000, 4000, 8000, 16000, 32000], 2000)
>>> breakpoints(next_horizon__exponential, first_horizon, horizon)
([1000, 31622], 1622)
>>> breakpoints(next_horizon__exponential_slow, first_horizon, horizon)
([1000, 1995, 4265, 9838, 24671, 67827], 37827)
>>> breakpoints(next_horizon__exponential_fast, first_horizon, horizon)
([1000, 1000000], 970000)
- Second examples:
>>> first_horizon = 5000
>>> horizon = 1000000
>>> breakpoints(next_horizon__arithmetic, first_horizon, horizon) # doctest: +ELLIPSIS
([5000, 5200, ..., 999600, 999800, 1000000], 0)
>>> breakpoints(next_horizon__geometric, first_horizon, horizon)
([5000, 10000, 20000, 40000, 80000, 160000, 320000, 640000, 1280000], 280000)
>>> breakpoints(next_horizon__exponential, first_horizon, horizon)
([5000, 353553, 210223755], 209223755)
>>> breakpoints(next_horizon__exponential_slow, first_horizon, horizon)
([5000, 11718, 29904, 83811, 260394, 906137, 3572014], 2572014)
>>> breakpoints(next_horizon__exponential_fast, first_horizon, horizon)
([5000, 25000000], 24000000)
- Third examples:
>>> first_horizon = 10
>>> horizon = 1123456
>>> breakpoints(next_horizon__arithmetic, first_horizon, horizon) # doctest: +ELLIPSIS
([10, 210, 410, ..., 1123210, 1123410, 1123610], 154)
>>> breakpoints(next_horizon__geometric, first_horizon, horizon)
([10, 20, 40, 80, 160, 320, 640, 1280, 2560, 5120, 10240, 20480, 40960, 81920, 163840, 327680, 655360, 1310720], 187264)
>>> breakpoints(next_horizon__exponential, first_horizon, horizon)
([10, 31, 172, 2255, 107082, 35040856], 33917400)
>>> breakpoints(next_horizon__exponential_slow, first_horizon, horizon)
([10, 12, 15, 19, 25, 34, 48, 70, 107, 170, 284, 499, 928, 1837, 3895, 8903, 22104, 60106, 180638, 606024, 2294768], 1171312)
>>> breakpoints(next_horizon__exponential_fast, first_horizon, horizon)
([10, 100, 10000, 100000000], 98876544)
"""
i = 0
t = max(first_horizon, 2)
times = [t]
if debug: print("\n\nFor the growth function {}, named '{}', first guess of the horizon = {} and true horizon = {} ...\n ==> The times will be:".format(next_horizon, getattr(next_horizon, '__latex_name__', '?'), first_horizon, horizon))
while t < horizon:
t = next_horizon(i, t)
i += 1
times.append(t)
if debug: print(" The {}th breakpoint is {} ...".format(i, t)) # DEBUG
assert horizon <= t, "Error: the last guess for horizon = {} was found smaller than the true horizon = {}...".format(t, horizon) # DEBUG
gap = t - horizon
if debug: print("This last guess for horizon = {} gives a gap = {} against the true horizon {}. Relative difference = {:.3%}...".format(t, gap, horizon, gap / float(horizon))) # DEBUG
return times, gap
# --- Experimental code to plot some doubling sequences and
# check numerically some inequalities :
# like controlling a sum Sigma_i=0^n u_i by a constant times to last term u_n
# and controlling the last term u_{L_T} as a function of T.
#: The constant c in front of the function f.
constant_c_for_the_functions_f = 1.0
constant_c_for_the_functions_f = 0.1
constant_c_for_the_functions_f = 0.5
def function_f__for_geometric_sequences(i, c=constant_c_for_the_functions_f):
r""" For the *geometric* doubling sequences, :math:`f(i) = c \times \log(i)`."""
if i <= 0: return 0.0
return c * np.log(i)
def function_f__for_exponential_sequences(i, c=constant_c_for_the_functions_f):
r""" For the *exponential* doubling sequences, :math:`f(i) = c \times i`."""
return c * i
def function_f__for_generic_sequences(i, c=constant_c_for_the_functions_f, d=0.5, e=0.0):
r""" For a certain *generic* family of doubling sequences, :math:`f(i) = c \times i^{d} \times (\log(i))^{e}`.
- ``d, e = 0, 1`` gives :func:`function_f__for_geometric_sequences`,
- ``d, e = 1, 0`` gives :func:`function_f__for_geometric_sequences`,
- ``d, e = 0.5, 0`` gives an intermediate sequence, growing faster than any geometric sequence and slower than any exponential sequence,
- any other combination has not been studied yet.
.. warning:: ``d`` should most probably be smaller than 1.
"""
i = float(i)
if i <= 0: return 0.0
if e == 0:
assert d > 0, "Error: invalid value of d = {} for function_f__for_generic_sequences.".format(d) # DEBUG
return c * (i ** d)
if d == 0:
assert e > 0, "Error: invalid value of e = {} for function_f__for_generic_sequences.".format(e) # DEBUG
return c * ((np.log(i)) ** e)
return c * (i ** d) * ((np.log(i)) ** e)
#: Value of the parameter :math:`\alpha` for the :func:`Ti_from_f` function.
alpha_for_Ti = 0.1
alpha_for_Ti = 1.0
alpha_for_Ti = 0.5
def Ti_from_f(f, alpha=alpha_for_Ti, *args, **kwargs):
r""" For any non-negative and increasing function :math:`f: i \mapsto f(i)`, the corresponding sequence is defined by:
.. math:: \forall i\in\mathbb{N},\; T_i := \lfloor \exp(\alpha \times \exp(f(i))) \rfloor.
.. warning:: :math:`f(i)` can need other parameters, see the examples above. They can be given as ``*args`` or ``**kwargs`` to :func:`Ti_from_f`.
.. warning:: it should be computed otherwise, I should give :math:`i \mapsto \exp(f(i))` instead of :math:`f: i \mapsto f(i)`. I need to try as much as possible to reduce the risk of overflow errors!
"""
# WARNING don't forget the floor!
return Ti
def Ti_geometric(i, horizon, alpha=alpha_for_Ti, first_horizon=DEFAULT_FIRST_HORIZON, *args, **kwargs):
""" Sequence :math:`T_i` generated from the function :math:`f` = :func:`function_f__for_geometric_sequences`."""
f = function_f__for_geometric_sequences
this_Ti = first_horizon + np.floor(np.exp(alpha * np.exp(f(float(i), *args, **kwargs))))
if not (np.isinf(this_Ti) or np.isnan(this_Ti)): this_Ti = int(this_Ti)
return this_Ti
Ti_geometric.__latex_name__ = r"$f(i)=\log(i)$"
def Ti_exponential(i, horizon, alpha=alpha_for_Ti, first_horizon=DEFAULT_FIRST_HORIZON, *args, **kwargs):
""" Sequence :math:`T_i` generated from the function :math:`f` = :func:`function_f__for_exponential_sequences`."""
f = function_f__for_exponential_sequences
this_Ti = first_horizon + np.floor(np.exp(alpha * np.exp(f(float(i), *args, **kwargs))))
if not (np.isinf(this_Ti) or np.isnan(this_Ti)): this_Ti = int(this_Ti)
return this_Ti
Ti_exponential.__latex_name__ = r"$f(i)=i$"
def Ti_intermediate_sqrti(i, horizon, alpha=alpha_for_Ti, first_horizon=DEFAULT_FIRST_HORIZON, *args, **kwargs):
""" Sequence :math:`T_i` generated from the function :math:`f` = :func:`function_f__for_intermediate_sequences`."""
f = function_f__for_intermediate_sequences
this_Ti = first_horizon + np.floor(np.exp(alpha * np.exp(f(float(i), *args, **kwargs))))
if not (np.isinf(this_Ti) or np.isnan(this_Ti)): this_Ti = int(this_Ti)
return this_Ti
Ti_intermediate_sqrti.__latex_name__ = r"$f(i)=\sqrt{i}$"
def Ti_intermediate_i13(i, horizon, alpha=alpha_for_Ti, first_horizon=DEFAULT_FIRST_HORIZON, *args, **kwargs):
""" Sequence :math:`T_i` generated from the function :math:`f` = :func:`function_f__for_intermediate2_sequences`."""
f = function_f__for_intermediate2_sequences
this_Ti = first_horizon + np.floor(np.exp(alpha * np.exp(f(float(i), *args, **kwargs))))
if not (np.isinf(this_Ti) or np.isnan(this_Ti)): this_Ti = int(this_Ti)
return this_Ti
Ti_intermediate_i13.__latex_name__ = r"$f(i)=i^{1/3}$"
def Ti_intermediate_i23(i, horizon, alpha=alpha_for_Ti, first_horizon=DEFAULT_FIRST_HORIZON, *args, **kwargs):
""" Sequence :math:`T_i` generated from the function :math:`f` = :func:`function_f__for_intermediate3_sequences`."""
f = function_f__for_intermediate3_sequences
this_Ti = first_horizon + np.floor(np.exp(alpha * np.exp(f(float(i), *args, **kwargs))))
if not (np.isinf(this_Ti) or np.isnan(this_Ti)): this_Ti = int(this_Ti)
return this_Ti
Ti_intermediate_i23.__latex_name__ = r"$f(i)=i^{2/3}$"
def Ti_intermediate_i12_logi12(i, horizon, alpha=alpha_for_Ti, first_horizon=DEFAULT_FIRST_HORIZON, *args, **kwargs):
""" Sequence :math:`T_i` generated from the function :math:`f` = :func:`function_f__for_intermediate4_sequences`."""
f = function_f__for_intermediate4_sequences
this_Ti = first_horizon + np.floor(np.exp(alpha * np.exp(f(float(i), *args, **kwargs))))
if not (np.isinf(this_Ti) or np.isnan(this_Ti)): this_Ti = int(this_Ti)
return this_Ti
Ti_intermediate_i12_logi12.__latex_name__ = r"$f(i)=\sqrt{i \log(i)}$"
def Ti_intermediate_i_by_logi(i, horizon, alpha=alpha_for_Ti, first_horizon=DEFAULT_FIRST_HORIZON, *args, **kwargs):
""" Sequence :math:`T_i` generated from the function :math:`f` = :func:`function_f__for_intermediate5_sequences`."""
f = function_f__for_intermediate5_sequences
this_Ti = first_horizon + np.floor(np.exp(alpha * np.exp(f(float(i + 1), *args, **kwargs))))
if not (np.isinf(this_Ti) or np.isnan(this_Ti)): this_Ti = int(this_Ti)
return this_Ti
Ti_intermediate_i_by_logi.__latex_name__ = r"$f(i)=i / \log(i)$"
def last_term_operator_LT(Ti, max_i=10000):
r""" For a certain function representing a doubling sequence, :math:`T: i \mapsto T_i`, this :func:`last_term_operator_LT` function returns the function :math:`L: T \mapsto L_T`, defined as:
.. math:: \forall T\in\mathbb{N},\; L_T := \min\{ i \in\mathbb{N},\; T \leq T_i \}.
:math:`L_T` is the only integer which satisfies :math:`T_{L_T - 1} < T \leq T_{L_T}`.
"""
return LT
import matplotlib.pyplot as plt
import seaborn as sns
def plot_doubling_sequences(
i_min=1, i_max=30,
list_of_f=(
function_f__for_geometric_sequences,
function_f__for_intermediate_sequences,
function_f__for_intermediate2_sequences,
function_f__for_intermediate3_sequences,
function_f__for_intermediate4_sequences,
function_f__for_exponential_sequences,
),
label_of_f=(
"Geometric doubling (d=0, e=1)",
"Intermediate doubling (d=1/2, e=0)",
"Intermediate doubling (d=1/3, e=0)",
"Intermediate doubling (d=2/3, e=0)",
"Intermediate doubling (d=1/2, e=1/2)",
"Exponential doubling (d=1, e=0)",
),
*args, **kwargs
):
r""" Display a plot to illustrate the values of the :math:`T_i` as a function of :math:`i` for some i.
- Can accept many functions f (and labels).
"""
# Make unique markers
nb = len(list_of_f)
allmarkers = ['o', 'D', 'v', 'p', '<', 's', '^', '*', 'h', '>']
longlist = allmarkers * (1 + int(nb / float(len(allmarkers)))) # Cycle the good number of time
markers = longlist[:nb] # Truncate
# Make unique colors
colors = sns.hls_palette(nb + 1)[:nb]
fig = plt.figure()
# plt.hold(True)
i_s = np.arange(i_min, i_max)
# now for each function f
for num_f, (f, la) in enumerate(zip(list_of_f, label_of_f)):
print("\n\nThe {}th function is referred to as {} and is {}".format(num_f, la, f)) # DEBUG
Ti = Ti_from_f(f)
values_of_Ti = np.array([ Ti(i) for i in i_s ])
plt.plot(i_s, values_of_Ti, label=la, lw=3, ms=3, color=colors[num_f], marker=markers[num_f])
plt.legend()
plt.xlabel(r"Value of the time horizon $i = {},...,{}$".format(i_min, i_max))
plt.title(r"Comparison of the values of $T_i$")
plt.show()
return fig
def plot_quality_first_upper_bound(
Tmin=10, Tmax=int(1e8), nbTs=100,
gamma=0.0, delta=1.0, # XXX bound in RT <= log(T)
# gamma=0.5, delta=0.0, # XXX bound in RT <= sqrt(T)
# gamma=0.5, delta=0.5, # XXX bound in RT <= sqrt(T * log(T))
# gamma=0.66667, delta=1.0, # XXX another weird bound in RT <= T^2/3 * log(T)
list_of_f=(
function_f__for_geometric_sequences,
function_f__for_intermediate_sequences,
function_f__for_intermediate2_sequences,
function_f__for_intermediate3_sequences,
function_f__for_intermediate4_sequences,
function_f__for_exponential_sequences,
),
label_of_f=(
"Geometric doubling (d=0, e=1)",
"Intermediate doubling (d=1/2, e=0)",
"Intermediate doubling (d=1/3, e=0)",
"Intermediate doubling (d=2/3, e=0)",
"Intermediate doubling (d=1/2, e=1/2)",
"Exponential doubling (d=1, e=0)",
),
show_Ti_m_Tim1=True,
# show_Ti_m_Tim1=False, # DEBUG
*args, **kwargs
):
r""" Display a plot to compare numerically between the following sum :math:`S` and the upper-bound we hope to have, :math:`T^{\gamma} (\log T)^{\delta}`, as a function of :math:`T` for some values between :math:`T_{\min}` and :math:`T_{\max}`:
.. math:: S := \sum_{i=0}^{L_T} (T_i - T_{i-1})^{\gamma} (\log (T_i - T_{i-1}))^{\delta}.
- Can accept many functions f (and labels).
- Can use :math:`T_i` instead of :math:`T_i - T_{i-1}` if ``show_Ti_m_Tim1=False`` (default is to use the smaller possible bound, with difference of sequence lengths, :math:`T_i - T_{i-1}`).
.. warning:: This is still ON GOING WORK.
"""
# Make unique markers
nb = len(list_of_f)
allmarkers = ['o', 'D', 'v', 'p', '<', 's', '^', '*', 'h', '>']
longlist = allmarkers * (1 + int(nb / float(len(allmarkers)))) # Cycle the good number of time
markers = longlist[:nb] # Truncate
# Make unique colors
colors = sns.hls_palette(nb + 1)[:nb]
fig = plt.figure()
# plt.hold(True)
Ts = np.floor(np.linspace(Tmin, Tmax, num=nbTs))
the_bound_we_want = (Ts ** gamma) * (np.log(Ts) ** delta)
# plt.plot(Ts, the_bound_we_want, label=r"$T^{\gamma} (\log T)^{\delta}$", lw=3, ms=3, color=colors[0], marker=markers[0])
# compute the sequence lengths to use, either T_i or T_i - T_{i-1}
Ts_for_f = np.copy(Ts)
if show_Ti_m_Tim1: Ts_for_f[1:] = np.diff(Ts)
# now for each function f
for num_f, (f, la) in enumerate(zip(list_of_f, label_of_f)):
print("\n\nThe {}th function is referred to as {} and is {}".format(num_f, la, f)) # DEBUG
Ti = Ti_from_f(f)
LT = last_term_operator_LT(Ti)
the_sum_we_have = np.zeros_like(Ts_for_f)
for j, (Tj, dTj) in enumerate(zip(Ts, Ts_for_f)):
LTj = LT(Tj)
the_sum_we_have[j] = sum(
(dTj ** gamma) * (np.log(dTj) ** delta)
for i in range(0, LTj + 1)
)
print("For j = {}, Tj = {}, dTj = {}, gives LTj = {}, and the value of the sum from i=0 to LTj is = {}.".format(j, Tj, dTj, LTj, the_sum_we_have[j])) # DEBUG
print("the_sum_we_have =", the_sum_we_have) # DEBUG
plt.plot(Ts, the_sum_we_have / the_bound_we_want, label=la, lw=3, ms=3, color=colors[num_f], marker=markers[num_f])
plt.legend()
plt.xlabel(r"Value of the time horizon $T = {},...,{}$".format(Tmin, Tmax))
str_of_Tj_or_dTj = "T_i - T_{i-1}" if show_Ti_m_Tim1 else "T_i"
plt.title(r"Ratio of the sum $\sum_{i=0}^{L_T} (%s)^{\gamma} (\log(%s))^{\delta}$ and the upper-bound $T^{\gamma} \log(T)^{\delta}$, for $\gamma=%.3g$, $\delta=%.3g$." % (str_of_Tj_or_dTj, str_of_Tj_or_dTj, gamma, delta)) # DEBUG
plt.show()
return fig
# --- The interesting class
#: If the sequence Ti does not grow enough, artificially increase i until T_inext > T_i
MAX_NB_OF_TRIALS = 500
class DoublingTrickWrapper(BaseWrapperPolicy):
r""" A policy that acts as a wrapper on another policy `P`, assumed to be *horizon dependent* (has to known :math:`T`), by implementing a "doubling trick".
- Reference: [[What the Doubling Trick Can or Can't Do for Multi-Armed Bandits, Lilian Besson and Emilie Kaufmann, 2018]](https://hal.inria.fr/hal-01736357), to be presented soon.
"""
# --- pretty printing
# --- Start game by creating new underlying policy
def startGame(self):
""" Initialize the policy for a new game."""
super(BaseWrapperPolicy, self).startGame()
# super(DoublingTrickWrapper, self).startGame() # WARNING no
self._i = 0 # reinitialize this
self.horizon = self._first_horizon #: Last guess for the horizon
try:
self.policy = self._policy(self.nbArms, horizon=self.horizon, lower=self.lower, amplitude=self.amplitude, *self._args, **self._kwargs)
except Exception as e:
print("WARNING: Received exception {} when trying to create the underlying policy... maybe the 'horizon={}' keyword argument was not understood correctly? Retrying without it...".format(e, self.horizon)) # DEBUG
self.policy = self._policy(self.nbArms, lower=self.lower, amplitude=self.amplitude, *self._args, **self._kwargs)
# now also start game for the underlying policy
self.policy.startGame()
# --- Pass the call to the subpolicy
def getReward(self, arm, reward):
""" Pass the reward, as usual, update t and sometimes restart the underlying policy."""
# print(" - At time t = {}, got a reward = {} from arm {} ...".format(self.t, arm, reward)) # DEBUG
# super(DoublingTrickWrapper, self).getReward(arm, reward)
self.t += 1
self.policy.getReward(arm, reward)
# Maybe we have to update the horizon?
if self.t > self.horizon:
self._i += 1
new_horizon = self._next_horizon(self._i, self.horizon)
# XXX <!-- small hack if the sequence is not growing fast enough
nb_of_trials = 1
while nb_of_trials < MAX_NB_OF_TRIALS and new_horizon <= self.horizon:
self._i += 1
nb_of_trials += 1
new_horizon = self._next_horizon(self._i, self.horizon)
# XXX end of small hack -->
assert new_horizon > self.horizon, "Error: the new_horizon = {} is not > the current horizon = {} ...".format(new_horizon, self.horizon) # DEBUG
# print(" - At time t = {}, a DoublingTrickWrapper class was running with current horizon T_i = {} and decided to use {} as a new horizon...".format(self.t, self.horizon, new_horizon)) # DEBUG
self.horizon = new_horizon
# now we have to update or restart the underlying policy
if self.full_restart:
try:
self.policy = self._policy(self.nbArms, horizon=self.horizon, lower=self.lower, amplitude=self.amplitude, *self._args, **self._kwargs)
except Exception as e:
# print("Received exception {} when trying to create the underlying policy... maybe the 'horizon={}' keyword argument was not understood correctly? Retrying without it...".format(e, self.horizon)) # DEBUG
self.policy = self._policy(self.nbArms, lower=self.lower, amplitude=self.amplitude, *self._args, **self._kwargs)
# now also start game for the underlying policy
self.policy.startGame()
# print(" ==> Fully restarting the underlying policy by creating a new object... Now it is = {} ...".format(self.policy)) # DEBUG
else:
if hasattr(self.policy, 'horizon'):
try:
self.policy.horizon = self.horizon
except AttributeError:
pass
# print("Warning: unable to update the parameter 'horizon' of the underlying policy {}... Trying '_horizon' ...".format(self.policy)) # DEBUG
# print(" ==> Just updating the horizon parameter of the underlying policy... Now it is = {} ...".format(self.policy)) # DEBUG
# else:
# print(" ==> Nothing to do, as the underlying policy DOES NOT have a 'horizon' or '_horizon' parameter that could have been updated... Maybe you are not using a good policy? I suggest UCBH or ApproximatedFHGittins.") # DEBUG
# # --- Debugging
if __name__ == "__main__":
import sys
if "plot" in sys.argv[1:]:
plt.ion()
# plot_doubling_sequences()
for gamma, delta in [
(0.0, 1.0), # XXX bound in RT <= log(T)
(0.5, 0.0), # XXX bound in RT <= sqrt(T)
(0.5, 0.5), # XXX bound in RT <= sqrt(T * log(T))
(0.66667, 1.0), # XXX another weird bound in RT <= T^2/3 * log(T)
]:
plot_quality_first_upper_bound(gamma=gamma, delta=delta, show_Ti_m_Tim1=True)
plot_quality_first_upper_bound(gamma=gamma, delta=delta, show_Ti_m_Tim1=False)
sys.exit(0)
# Code for debugging purposes.
from doctest import testmod
print("\nTesting automatically all the docstring written in each functions of this module :")
testmod(verbose=True)
| 47.297904 | 346 | 0.65963 | # -*- coding: utf-8 -*-
r""" A policy that acts as a wrapper on another policy `P`, assumed to be *horizon dependent* (has to known :math:`T`), by implementing a "doubling trick":
- starts to assume that :math:`T=T_0=1000`, and run the policy :math:`P(T_0)`, from :math:`t=1` to :math:`t=T_0`,
- if :math:`t > T_0`, then the "doubling trick" is performed, by either re-initializing or just changing the parameter `horizon` of the policy P, for instance with :math:`T_2 = 10 \times T_0`,
- and keep doing this until :math:`t = T`.
.. note::
This is implemented in a very generic way, with simply a function `next_horizon(horizon)` that gives the next horizon to try when crossing the current guess.
It can be a simple linear function (`next_horizon(horizon) = horizon + 100`), a geometric growth to have the "real" doubling trick (`next_horizon(horizon) = horizon * 10`), or even functions growing exponentially fast (`next_horizon(horizon) = horizon ** 1.1`, `next_horizon(horizon) = horizon ** 1.5`, `next_horizon(horizon) = horizon ** 2`).
.. note::
My guess is that this "doubling trick" wrapping policy can only be efficient (for stochastic problems) if:
- the underlying policy `P` is a very efficient horizon-dependent algorithm, e.g., the :class:`Policies.ApproximatedFHGittins`,
- the growth function `next_horizon` is growing faster than any geometric rate, so that the number of refresh is :math:`o(\log T)` and not :math:`O(\log T)`.
.. seealso::
Reference: [[What the Doubling Trick Can or Can't Do for Multi-Armed Bandits, Lilian Besson and Emilie Kaufmann, 2018]](https://hal.inria.fr/hal-01736357), to be presented soon.
.. warning::
Interface: If `FULL_RESTART=False` (default), the underlying algorithm is recreated at every breakpoint,
instead its attribute `horizon` or `_horizon` is updated. Be sure that this is enough to really
change the internal value used by the policy. Some policy use T only once to compute others parameters,
which should be updated as well. A manual implementation of the `__setattr__` method can help.
"""
from __future__ import division, print_function # Python 2 compatibility
__author__ = "Lilian Besson"
__version__ = "0.9"
import numpy as np
try:
from .BaseWrapperPolicy import BaseWrapperPolicy
from .UCBH import UCBH
except ImportError:
from BaseWrapperPolicy import BaseWrapperPolicy
from UCBH import UCBH
try:
from .usenumba import jit # Import numba.jit or a dummy jit(f)=f
except (ValueError, ImportError, SystemError):
from usenumba import jit # Import numba.jit or a dummy jit(f)=f
#: Default horizon-dependent policy
default_horizonDependent_policy = UCBH
#: Default constant to know what to do when restarting the underlying policy with a new horizon parameter.
#:
#: - `True` means that a new policy, initialized from scratch, will be created at every breakpoint.
#: - `False` means that the same policy object is used but just its attribute `horizon` is updated (default).
FULL_RESTART = True
FULL_RESTART = False
#: Default horizon, used for the first step.
DEFAULT_FIRST_HORIZON = 200
#: Default stepsize for the arithmetic horizon progression.
ARITHMETIC_STEP = 10 * DEFAULT_FIRST_HORIZON
ARITHMETIC_STEP = 1 * DEFAULT_FIRST_HORIZON
@jit
def next_horizon__arithmetic(i, horizon):
r""" The arithmetic horizon progression function:
.. math::
T &\mapsto T + 100,\\
T_i &:= T_0 + 100 \times i.
"""
return horizon + ARITHMETIC_STEP
next_horizon__arithmetic.__latex_name__ = "arithm"
next_horizon__arithmetic.__latex_name__ = r"$T_i = {} + {} \times i$".format(DEFAULT_FIRST_HORIZON, ARITHMETIC_STEP)
#: Default multiplicative constant for the geometric horizon progression.
GEOMETRIC_STEP = 2
@jit
def next_horizon__geometric(i, horizon):
r""" The geometric horizon progression function:
.. math::
T &\mapsto T \times 2,\\
T_i &:= T_0 2^i.
"""
return horizon * GEOMETRIC_STEP
next_horizon__geometric.__latex_name__ = "geom"
next_horizon__geometric.__latex_name__ = r"$T_i = {} \times {}^i$".format(DEFAULT_FIRST_HORIZON, GEOMETRIC_STEP)
#: Default exponential constant for the exponential horizon progression.
EXPONENTIAL_STEP = 1.5
@jit
def next_horizon__exponential(i, horizon):
r""" The exponential horizon progression function:
.. math::
T &\mapsto \left\lfloor T^{1.5} \right\rfloor,\\
T_i &:= \left\lfloor T_0^{1.5^i} \right\rfloor.
"""
return int(np.floor(horizon ** EXPONENTIAL_STEP))
next_horizon__exponential.__latex_name__ = "exp"
next_horizon__exponential.__latex_name__ = r"$T_i = {}^{}$".format(DEFAULT_FIRST_HORIZON, r"{%.3g^i}" % EXPONENTIAL_STEP)
#: Default exponential constant for the slow exponential horizon progression.
SLOW_EXPONENTIAL_STEP = 1.1
@jit
def next_horizon__exponential_slow(i, horizon):
r""" The exponential horizon progression function:
.. math::
T &\mapsto \left\lfloor T^{1.1} \right\rfloor,\\
T_i &:= \left\lfloor T_0^{1.1^i} \right\rfloor.
"""
return int(np.floor(horizon ** SLOW_EXPONENTIAL_STEP))
next_horizon__exponential_slow.__latex_name__ = "slow exp"
next_horizon__exponential_slow.__latex_name__ = r"$T_i = {}^{}$".format(DEFAULT_FIRST_HORIZON, r"{%.3g^i}" % SLOW_EXPONENTIAL_STEP)
#: Default exponential constant for the fast exponential horizon progression.
FAST_EXPONENTIAL_STEP = 2
@jit
def next_horizon__exponential_fast(i, horizon):
r""" The exponential horizon progression function:
.. math::
T &\mapsto \lfloor T^{2} \rfloor,\\
T_i &:= \lfloor T_0^{2^i} \rfloor.
"""
return int(np.floor(horizon ** 2))
next_horizon__exponential_fast.__latex_name__ = "fast exp"
next_horizon__exponential_fast.__latex_name__ = r"$T_i = {}^{}$".format(DEFAULT_FIRST_HORIZON, r"{%.3g^i}" % FAST_EXPONENTIAL_STEP)
#: Default constant :math:`\alpha` for the generic exponential sequence.
ALPHA = 2
#: Default constant :math:`\beta` for the generic exponential sequence.
BETA = 2
def next_horizon__exponential_generic(i, horizon):
r""" The generic exponential horizon progression function:
.. math:: T_i := \left\lfloor \frac{T_0}{a} a^{b^i} \right\rfloor.
"""
return int((DEFAULT_FIRST_HORIZON / ALPHA) * ALPHA ** (BETA ** i))
# return int(ALPHA * np.floor(horizon ** BETA))
next_horizon__exponential_generic.__latex_name__ = r"exp $a={:.3g}$, $b={:.3g}$".format(ALPHA, BETA)
next_horizon__exponential_generic.__latex_name__ = r"$T_i = ({}/{}) {}^{}$".format(DEFAULT_FIRST_HORIZON, ALPHA, ALPHA, r"{%.3g^i}" % BETA)
#: Chose the default horizon growth function.
# default_next_horizon = next_horizon__arithmetic
# default_next_horizon = next_horizon__geometric
# default_next_horizon = next_horizon__geometric
# default_next_horizon = next_horizon__exponential_fast
default_next_horizon = next_horizon__exponential_slow
# --- Utility function
def breakpoints(next_horizon, first_horizon, horizon, debug=False):
r""" Return the list of restart point (breakpoints), if starting from ``first_horizon`` to ``horizon`` with growth function ``next_horizon``.
- Also return the gap between the last guess for horizon and the true horizon. This gap should not be too large.
- Nicely print all the values if ``debug=True``.
- First examples:
>>> first_horizon = 1000
>>> horizon = 30000
>>> breakpoints(next_horizon__arithmetic, first_horizon, horizon) # doctest: +ELLIPSIS
([1000, 1200, 1400, ..., 29800, 30000], 0)
>>> breakpoints(next_horizon__geometric, first_horizon, horizon)
([1000, 2000, 4000, 8000, 16000, 32000], 2000)
>>> breakpoints(next_horizon__exponential, first_horizon, horizon)
([1000, 31622], 1622)
>>> breakpoints(next_horizon__exponential_slow, first_horizon, horizon)
([1000, 1995, 4265, 9838, 24671, 67827], 37827)
>>> breakpoints(next_horizon__exponential_fast, first_horizon, horizon)
([1000, 1000000], 970000)
- Second examples:
>>> first_horizon = 5000
>>> horizon = 1000000
>>> breakpoints(next_horizon__arithmetic, first_horizon, horizon) # doctest: +ELLIPSIS
([5000, 5200, ..., 999600, 999800, 1000000], 0)
>>> breakpoints(next_horizon__geometric, first_horizon, horizon)
([5000, 10000, 20000, 40000, 80000, 160000, 320000, 640000, 1280000], 280000)
>>> breakpoints(next_horizon__exponential, first_horizon, horizon)
([5000, 353553, 210223755], 209223755)
>>> breakpoints(next_horizon__exponential_slow, first_horizon, horizon)
([5000, 11718, 29904, 83811, 260394, 906137, 3572014], 2572014)
>>> breakpoints(next_horizon__exponential_fast, first_horizon, horizon)
([5000, 25000000], 24000000)
- Third examples:
>>> first_horizon = 10
>>> horizon = 1123456
>>> breakpoints(next_horizon__arithmetic, first_horizon, horizon) # doctest: +ELLIPSIS
([10, 210, 410, ..., 1123210, 1123410, 1123610], 154)
>>> breakpoints(next_horizon__geometric, first_horizon, horizon)
([10, 20, 40, 80, 160, 320, 640, 1280, 2560, 5120, 10240, 20480, 40960, 81920, 163840, 327680, 655360, 1310720], 187264)
>>> breakpoints(next_horizon__exponential, first_horizon, horizon)
([10, 31, 172, 2255, 107082, 35040856], 33917400)
>>> breakpoints(next_horizon__exponential_slow, first_horizon, horizon)
([10, 12, 15, 19, 25, 34, 48, 70, 107, 170, 284, 499, 928, 1837, 3895, 8903, 22104, 60106, 180638, 606024, 2294768], 1171312)
>>> breakpoints(next_horizon__exponential_fast, first_horizon, horizon)
([10, 100, 10000, 100000000], 98876544)
"""
i = 0
t = max(first_horizon, 2)
times = [t]
if debug: print("\n\nFor the growth function {}, named '{}', first guess of the horizon = {} and true horizon = {} ...\n ==> The times will be:".format(next_horizon, getattr(next_horizon, '__latex_name__', '?'), first_horizon, horizon))
while t < horizon:
t = next_horizon(i, t)
i += 1
times.append(t)
if debug: print(" The {}th breakpoint is {} ...".format(i, t)) # DEBUG
assert horizon <= t, "Error: the last guess for horizon = {} was found smaller than the true horizon = {}...".format(t, horizon) # DEBUG
gap = t - horizon
if debug: print("This last guess for horizon = {} gives a gap = {} against the true horizon {}. Relative difference = {:.3%}...".format(t, gap, horizon, gap / float(horizon))) # DEBUG
return times, gap
# --- Experimental code to plot some doubling sequences and
# check numerically some inequalities :
# like controlling a sum Sigma_i=0^n u_i by a constant times to last term u_n
# and controlling the last term u_{L_T} as a function of T.
#: The constant c in front of the function f.
constant_c_for_the_functions_f = 1.0
constant_c_for_the_functions_f = 0.1
constant_c_for_the_functions_f = 0.5
def function_f__for_geometric_sequences(i, c=constant_c_for_the_functions_f):
r""" For the *geometric* doubling sequences, :math:`f(i) = c \times \log(i)`."""
if i <= 0: return 0.0
return c * np.log(i)
def function_f__for_exponential_sequences(i, c=constant_c_for_the_functions_f):
r""" For the *exponential* doubling sequences, :math:`f(i) = c \times i`."""
return c * i
def function_f__for_generic_sequences(i, c=constant_c_for_the_functions_f, d=0.5, e=0.0):
r""" For a certain *generic* family of doubling sequences, :math:`f(i) = c \times i^{d} \times (\log(i))^{e}`.
- ``d, e = 0, 1`` gives :func:`function_f__for_geometric_sequences`,
- ``d, e = 1, 0`` gives :func:`function_f__for_geometric_sequences`,
- ``d, e = 0.5, 0`` gives an intermediate sequence, growing faster than any geometric sequence and slower than any exponential sequence,
- any other combination has not been studied yet.
.. warning:: ``d`` should most probably be smaller than 1.
"""
i = float(i)
if i <= 0: return 0.0
if e == 0:
assert d > 0, "Error: invalid value of d = {} for function_f__for_generic_sequences.".format(d) # DEBUG
return c * (i ** d)
if d == 0:
assert e > 0, "Error: invalid value of e = {} for function_f__for_generic_sequences.".format(e) # DEBUG
return c * ((np.log(i)) ** e)
return c * (i ** d) * ((np.log(i)) ** e)
def function_f__for_intermediate_sequences(i):
return function_f__for_generic_sequences(i, c=constant_c_for_the_functions_f, d=0.5, e=0.0)
def function_f__for_intermediate2_sequences(i):
return function_f__for_generic_sequences(i, c=constant_c_for_the_functions_f, d=0.3333, e=0.0)
def function_f__for_intermediate3_sequences(i):
return function_f__for_generic_sequences(i, c=constant_c_for_the_functions_f, d=0.6667, e=0.0)
def function_f__for_intermediate4_sequences(i):
return function_f__for_generic_sequences(i, c=constant_c_for_the_functions_f, d=0.5, e=0.5)
def function_f__for_intermediate5_sequences(i):
return function_f__for_generic_sequences(i, c=constant_c_for_the_functions_f, d=1, e=-1)
#: Value of the parameter :math:`\alpha` for the :func:`Ti_from_f` function.
alpha_for_Ti = 0.1
alpha_for_Ti = 1.0
alpha_for_Ti = 0.5
def Ti_from_f(f, alpha=alpha_for_Ti, *args, **kwargs):
r""" For any non-negative and increasing function :math:`f: i \mapsto f(i)`, the corresponding sequence is defined by:
.. math:: \forall i\in\mathbb{N},\; T_i := \lfloor \exp(\alpha \times \exp(f(i))) \rfloor.
.. warning:: :math:`f(i)` can need other parameters, see the examples above. They can be given as ``*args`` or ``**kwargs`` to :func:`Ti_from_f`.
.. warning:: it should be computed otherwise, I should give :math:`i \mapsto \exp(f(i))` instead of :math:`f: i \mapsto f(i)`. I need to try as much as possible to reduce the risk of overflow errors!
"""
# WARNING don't forget the floor!
def Ti(i):
this_Ti = np.floor(np.exp(alpha * np.exp(f(float(i), *args, **kwargs))))
if not (np.isinf(this_Ti) or np.isnan(this_Ti)):
this_Ti = int(this_Ti)
# print(" For f = {}, i = {} gives Ti = {}".format(f, i, this_Ti)) # DEBUG
return this_Ti
return Ti
def Ti_geometric(i, horizon, alpha=alpha_for_Ti, first_horizon=DEFAULT_FIRST_HORIZON, *args, **kwargs):
""" Sequence :math:`T_i` generated from the function :math:`f` = :func:`function_f__for_geometric_sequences`."""
f = function_f__for_geometric_sequences
this_Ti = first_horizon + np.floor(np.exp(alpha * np.exp(f(float(i), *args, **kwargs))))
if not (np.isinf(this_Ti) or np.isnan(this_Ti)): this_Ti = int(this_Ti)
return this_Ti
Ti_geometric.__latex_name__ = r"$f(i)=\log(i)$"
def Ti_exponential(i, horizon, alpha=alpha_for_Ti, first_horizon=DEFAULT_FIRST_HORIZON, *args, **kwargs):
""" Sequence :math:`T_i` generated from the function :math:`f` = :func:`function_f__for_exponential_sequences`."""
f = function_f__for_exponential_sequences
this_Ti = first_horizon + np.floor(np.exp(alpha * np.exp(f(float(i), *args, **kwargs))))
if not (np.isinf(this_Ti) or np.isnan(this_Ti)): this_Ti = int(this_Ti)
return this_Ti
Ti_exponential.__latex_name__ = r"$f(i)=i$"
def Ti_intermediate_sqrti(i, horizon, alpha=alpha_for_Ti, first_horizon=DEFAULT_FIRST_HORIZON, *args, **kwargs):
""" Sequence :math:`T_i` generated from the function :math:`f` = :func:`function_f__for_intermediate_sequences`."""
f = function_f__for_intermediate_sequences
this_Ti = first_horizon + np.floor(np.exp(alpha * np.exp(f(float(i), *args, **kwargs))))
if not (np.isinf(this_Ti) or np.isnan(this_Ti)): this_Ti = int(this_Ti)
return this_Ti
Ti_intermediate_sqrti.__latex_name__ = r"$f(i)=\sqrt{i}$"
def Ti_intermediate_i13(i, horizon, alpha=alpha_for_Ti, first_horizon=DEFAULT_FIRST_HORIZON, *args, **kwargs):
""" Sequence :math:`T_i` generated from the function :math:`f` = :func:`function_f__for_intermediate2_sequences`."""
f = function_f__for_intermediate2_sequences
this_Ti = first_horizon + np.floor(np.exp(alpha * np.exp(f(float(i), *args, **kwargs))))
if not (np.isinf(this_Ti) or np.isnan(this_Ti)): this_Ti = int(this_Ti)
return this_Ti
Ti_intermediate_i13.__latex_name__ = r"$f(i)=i^{1/3}$"
def Ti_intermediate_i23(i, horizon, alpha=alpha_for_Ti, first_horizon=DEFAULT_FIRST_HORIZON, *args, **kwargs):
""" Sequence :math:`T_i` generated from the function :math:`f` = :func:`function_f__for_intermediate3_sequences`."""
f = function_f__for_intermediate3_sequences
this_Ti = first_horizon + np.floor(np.exp(alpha * np.exp(f(float(i), *args, **kwargs))))
if not (np.isinf(this_Ti) or np.isnan(this_Ti)): this_Ti = int(this_Ti)
return this_Ti
Ti_intermediate_i23.__latex_name__ = r"$f(i)=i^{2/3}$"
def Ti_intermediate_i12_logi12(i, horizon, alpha=alpha_for_Ti, first_horizon=DEFAULT_FIRST_HORIZON, *args, **kwargs):
""" Sequence :math:`T_i` generated from the function :math:`f` = :func:`function_f__for_intermediate4_sequences`."""
f = function_f__for_intermediate4_sequences
this_Ti = first_horizon + np.floor(np.exp(alpha * np.exp(f(float(i), *args, **kwargs))))
if not (np.isinf(this_Ti) or np.isnan(this_Ti)): this_Ti = int(this_Ti)
return this_Ti
Ti_intermediate_i12_logi12.__latex_name__ = r"$f(i)=\sqrt{i \log(i)}$"
def Ti_intermediate_i_by_logi(i, horizon, alpha=alpha_for_Ti, first_horizon=DEFAULT_FIRST_HORIZON, *args, **kwargs):
""" Sequence :math:`T_i` generated from the function :math:`f` = :func:`function_f__for_intermediate5_sequences`."""
f = function_f__for_intermediate5_sequences
this_Ti = first_horizon + np.floor(np.exp(alpha * np.exp(f(float(i + 1), *args, **kwargs))))
if not (np.isinf(this_Ti) or np.isnan(this_Ti)): this_Ti = int(this_Ti)
return this_Ti
Ti_intermediate_i_by_logi.__latex_name__ = r"$f(i)=i / \log(i)$"
def last_term_operator_LT(Ti, max_i=10000):
r""" For a certain function representing a doubling sequence, :math:`T: i \mapsto T_i`, this :func:`last_term_operator_LT` function returns the function :math:`L: T \mapsto L_T`, defined as:
.. math:: \forall T\in\mathbb{N},\; L_T := \min\{ i \in\mathbb{N},\; T \leq T_i \}.
:math:`L_T` is the only integer which satisfies :math:`T_{L_T - 1} < T \leq T_{L_T}`.
"""
def LT(T, max_i=max_i):
i = 0
while Ti(i) < T:
i += 1
if i >= max_i:
raise ValueError("LT(T={T}) was unable to find a i <= {max_i} such that T_i >= T.".format(T, max_i)) # DEBUG
assert Ti(i - 1) < T <= Ti(i), "Error: i = {} was computed as LT for T = {} and Ti = {} but does not satisfy T_(i-1) < T <= T(i)".format(i, T, Ti) # DEBUG
# print(" For LT: i = {} was computed as LT for T = {} and Ti = {} and satisfies T(i-1) = {} < T <= T(i) = {}".format(i, T, Ti, Ti(i-1), Ti(i))) # DEBUG
return i
return LT
import matplotlib.pyplot as plt
import seaborn as sns
def plot_doubling_sequences(
i_min=1, i_max=30,
list_of_f=(
function_f__for_geometric_sequences,
function_f__for_intermediate_sequences,
function_f__for_intermediate2_sequences,
function_f__for_intermediate3_sequences,
function_f__for_intermediate4_sequences,
function_f__for_exponential_sequences,
),
label_of_f=(
"Geometric doubling (d=0, e=1)",
"Intermediate doubling (d=1/2, e=0)",
"Intermediate doubling (d=1/3, e=0)",
"Intermediate doubling (d=2/3, e=0)",
"Intermediate doubling (d=1/2, e=1/2)",
"Exponential doubling (d=1, e=0)",
),
*args, **kwargs
):
r""" Display a plot to illustrate the values of the :math:`T_i` as a function of :math:`i` for some i.
- Can accept many functions f (and labels).
"""
# Make unique markers
nb = len(list_of_f)
allmarkers = ['o', 'D', 'v', 'p', '<', 's', '^', '*', 'h', '>']
longlist = allmarkers * (1 + int(nb / float(len(allmarkers)))) # Cycle the good number of time
markers = longlist[:nb] # Truncate
# Make unique colors
colors = sns.hls_palette(nb + 1)[:nb]
fig = plt.figure()
# plt.hold(True)
i_s = np.arange(i_min, i_max)
# now for each function f
for num_f, (f, la) in enumerate(zip(list_of_f, label_of_f)):
print("\n\nThe {}th function is referred to as {} and is {}".format(num_f, la, f)) # DEBUG
Ti = Ti_from_f(f)
values_of_Ti = np.array([ Ti(i) for i in i_s ])
plt.plot(i_s, values_of_Ti, label=la, lw=3, ms=3, color=colors[num_f], marker=markers[num_f])
plt.legend()
plt.xlabel(r"Value of the time horizon $i = {},...,{}$".format(i_min, i_max))
plt.title(r"Comparison of the values of $T_i$")
plt.show()
return fig
def plot_quality_first_upper_bound(
Tmin=10, Tmax=int(1e8), nbTs=100,
gamma=0.0, delta=1.0, # XXX bound in RT <= log(T)
# gamma=0.5, delta=0.0, # XXX bound in RT <= sqrt(T)
# gamma=0.5, delta=0.5, # XXX bound in RT <= sqrt(T * log(T))
# gamma=0.66667, delta=1.0, # XXX another weird bound in RT <= T^2/3 * log(T)
list_of_f=(
function_f__for_geometric_sequences,
function_f__for_intermediate_sequences,
function_f__for_intermediate2_sequences,
function_f__for_intermediate3_sequences,
function_f__for_intermediate4_sequences,
function_f__for_exponential_sequences,
),
label_of_f=(
"Geometric doubling (d=0, e=1)",
"Intermediate doubling (d=1/2, e=0)",
"Intermediate doubling (d=1/3, e=0)",
"Intermediate doubling (d=2/3, e=0)",
"Intermediate doubling (d=1/2, e=1/2)",
"Exponential doubling (d=1, e=0)",
),
show_Ti_m_Tim1=True,
# show_Ti_m_Tim1=False, # DEBUG
*args, **kwargs
):
r""" Display a plot to compare numerically between the following sum :math:`S` and the upper-bound we hope to have, :math:`T^{\gamma} (\log T)^{\delta}`, as a function of :math:`T` for some values between :math:`T_{\min}` and :math:`T_{\max}`:
.. math:: S := \sum_{i=0}^{L_T} (T_i - T_{i-1})^{\gamma} (\log (T_i - T_{i-1}))^{\delta}.
- Can accept many functions f (and labels).
- Can use :math:`T_i` instead of :math:`T_i - T_{i-1}` if ``show_Ti_m_Tim1=False`` (default is to use the smaller possible bound, with difference of sequence lengths, :math:`T_i - T_{i-1}`).
.. warning:: This is still ON GOING WORK.
"""
# Make unique markers
nb = len(list_of_f)
allmarkers = ['o', 'D', 'v', 'p', '<', 's', '^', '*', 'h', '>']
longlist = allmarkers * (1 + int(nb / float(len(allmarkers)))) # Cycle the good number of time
markers = longlist[:nb] # Truncate
# Make unique colors
colors = sns.hls_palette(nb + 1)[:nb]
fig = plt.figure()
# plt.hold(True)
Ts = np.floor(np.linspace(Tmin, Tmax, num=nbTs))
the_bound_we_want = (Ts ** gamma) * (np.log(Ts) ** delta)
# plt.plot(Ts, the_bound_we_want, label=r"$T^{\gamma} (\log T)^{\delta}$", lw=3, ms=3, color=colors[0], marker=markers[0])
# compute the sequence lengths to use, either T_i or T_i - T_{i-1}
Ts_for_f = np.copy(Ts)
if show_Ti_m_Tim1: Ts_for_f[1:] = np.diff(Ts)
# now for each function f
for num_f, (f, la) in enumerate(zip(list_of_f, label_of_f)):
print("\n\nThe {}th function is referred to as {} and is {}".format(num_f, la, f)) # DEBUG
Ti = Ti_from_f(f)
LT = last_term_operator_LT(Ti)
the_sum_we_have = np.zeros_like(Ts_for_f)
for j, (Tj, dTj) in enumerate(zip(Ts, Ts_for_f)):
LTj = LT(Tj)
the_sum_we_have[j] = sum(
(dTj ** gamma) * (np.log(dTj) ** delta)
for i in range(0, LTj + 1)
)
print("For j = {}, Tj = {}, dTj = {}, gives LTj = {}, and the value of the sum from i=0 to LTj is = {}.".format(j, Tj, dTj, LTj, the_sum_we_have[j])) # DEBUG
print("the_sum_we_have =", the_sum_we_have) # DEBUG
plt.plot(Ts, the_sum_we_have / the_bound_we_want, label=la, lw=3, ms=3, color=colors[num_f], marker=markers[num_f])
plt.legend()
plt.xlabel(r"Value of the time horizon $T = {},...,{}$".format(Tmin, Tmax))
str_of_Tj_or_dTj = "T_i - T_{i-1}" if show_Ti_m_Tim1 else "T_i"
plt.title(r"Ratio of the sum $\sum_{i=0}^{L_T} (%s)^{\gamma} (\log(%s))^{\delta}$ and the upper-bound $T^{\gamma} \log(T)^{\delta}$, for $\gamma=%.3g$, $\delta=%.3g$." % (str_of_Tj_or_dTj, str_of_Tj_or_dTj, gamma, delta)) # DEBUG
plt.show()
return fig
# --- The interesting class
#: If the sequence Ti does not grow enough, artificially increase i until T_inext > T_i
MAX_NB_OF_TRIALS = 500
class DoublingTrickWrapper(BaseWrapperPolicy):
r""" A policy that acts as a wrapper on another policy `P`, assumed to be *horizon dependent* (has to known :math:`T`), by implementing a "doubling trick".
- Reference: [[What the Doubling Trick Can or Can't Do for Multi-Armed Bandits, Lilian Besson and Emilie Kaufmann, 2018]](https://hal.inria.fr/hal-01736357), to be presented soon.
"""
def __init__(self, nbArms,
full_restart=FULL_RESTART,
policy=default_horizonDependent_policy,
next_horizon=default_next_horizon,
first_horizon=DEFAULT_FIRST_HORIZON,
*args, **kwargs):
super(DoublingTrickWrapper, self).__init__(nbArms, policy=policy, *args, **kwargs)
self.full_restart = full_restart #: Constant to know how to refresh the underlying policy.
# --- Horizon
self._i = 0
self._next_horizon = next_horizon # Function for the growing horizon
self.next_horizon_name = getattr(next_horizon, '__latex_name__', '?') #: Pretty string of the name of this growing function
self._first_horizon = max(2, first_horizon) # First guess for the horizon
self.horizon = max(2, first_horizon) #: Last guess for the horizon
# XXX Force it, just for pretty printing...
self.startGame()
# --- pretty printing
def __str__(self):
# remove the T0 part from string representation of the policy
str_policy = str(self.policy)
str_policy = str_policy.replace(r"($T={}$)".format(self._first_horizon), "")
str_policy = str_policy.replace(r"$T={}$, ".format(self._first_horizon), "")
return r"{}({})[{}]".format("DT" if self.full_restart else "DTnr", self.next_horizon_name, str_policy)
# --- Start game by creating new underlying policy
def startGame(self):
""" Initialize the policy for a new game."""
super(BaseWrapperPolicy, self).startGame()
# super(DoublingTrickWrapper, self).startGame() # WARNING no
self._i = 0 # reinitialize this
self.horizon = self._first_horizon #: Last guess for the horizon
try:
self.policy = self._policy(self.nbArms, horizon=self.horizon, lower=self.lower, amplitude=self.amplitude, *self._args, **self._kwargs)
except Exception as e:
print("WARNING: Received exception {} when trying to create the underlying policy... maybe the 'horizon={}' keyword argument was not understood correctly? Retrying without it...".format(e, self.horizon)) # DEBUG
self.policy = self._policy(self.nbArms, lower=self.lower, amplitude=self.amplitude, *self._args, **self._kwargs)
# now also start game for the underlying policy
self.policy.startGame()
# --- Pass the call to the subpolicy
def getReward(self, arm, reward):
""" Pass the reward, as usual, update t and sometimes restart the underlying policy."""
# print(" - At time t = {}, got a reward = {} from arm {} ...".format(self.t, arm, reward)) # DEBUG
# super(DoublingTrickWrapper, self).getReward(arm, reward)
self.t += 1
self.policy.getReward(arm, reward)
# Maybe we have to update the horizon?
if self.t > self.horizon:
self._i += 1
new_horizon = self._next_horizon(self._i, self.horizon)
# XXX <!-- small hack if the sequence is not growing fast enough
nb_of_trials = 1
while nb_of_trials < MAX_NB_OF_TRIALS and new_horizon <= self.horizon:
self._i += 1
nb_of_trials += 1
new_horizon = self._next_horizon(self._i, self.horizon)
# XXX end of small hack -->
assert new_horizon > self.horizon, "Error: the new_horizon = {} is not > the current horizon = {} ...".format(new_horizon, self.horizon) # DEBUG
# print(" - At time t = {}, a DoublingTrickWrapper class was running with current horizon T_i = {} and decided to use {} as a new horizon...".format(self.t, self.horizon, new_horizon)) # DEBUG
self.horizon = new_horizon
# now we have to update or restart the underlying policy
if self.full_restart:
try:
self.policy = self._policy(self.nbArms, horizon=self.horizon, lower=self.lower, amplitude=self.amplitude, *self._args, **self._kwargs)
except Exception as e:
# print("Received exception {} when trying to create the underlying policy... maybe the 'horizon={}' keyword argument was not understood correctly? Retrying without it...".format(e, self.horizon)) # DEBUG
self.policy = self._policy(self.nbArms, lower=self.lower, amplitude=self.amplitude, *self._args, **self._kwargs)
# now also start game for the underlying policy
self.policy.startGame()
# print(" ==> Fully restarting the underlying policy by creating a new object... Now it is = {} ...".format(self.policy)) # DEBUG
else:
if hasattr(self.policy, 'horizon'):
try:
self.policy.horizon = self.horizon
except AttributeError:
pass
# print("Warning: unable to update the parameter 'horizon' of the underlying policy {}... Trying '_horizon' ...".format(self.policy)) # DEBUG
# print(" ==> Just updating the horizon parameter of the underlying policy... Now it is = {} ...".format(self.policy)) # DEBUG
# else:
# print(" ==> Nothing to do, as the underlying policy DOES NOT have a 'horizon' or '_horizon' parameter that could have been updated... Maybe you are not using a good policy? I suggest UCBH or ApproximatedFHGittins.") # DEBUG
# # --- Debugging
if __name__ == "__main__":
import sys
if "plot" in sys.argv[1:]:
plt.ion()
# plot_doubling_sequences()
for gamma, delta in [
(0.0, 1.0), # XXX bound in RT <= log(T)
(0.5, 0.0), # XXX bound in RT <= sqrt(T)
(0.5, 0.5), # XXX bound in RT <= sqrt(T * log(T))
(0.66667, 1.0), # XXX another weird bound in RT <= T^2/3 * log(T)
]:
plot_quality_first_upper_bound(gamma=gamma, delta=delta, show_Ti_m_Tim1=True)
plot_quality_first_upper_bound(gamma=gamma, delta=delta, show_Ti_m_Tim1=False)
sys.exit(0)
# Code for debugging purposes.
from doctest import testmod
print("\nTesting automatically all the docstring written in each functions of this module :")
testmod(verbose=True)
| 2,754 | 0 | 221 |
2fa4bbb8ea840af3c1d5538c76d509d8cce1d549 | 738 | py | Python | LM-1221 intro-python-xml/py-primer-1/concat_sol.py | russellpope/devnet-express-dc | 4bdb2194abdee2fc950c2ff20e607aa4af9b68b5 | [
"Apache-2.0"
] | null | null | null | LM-1221 intro-python-xml/py-primer-1/concat_sol.py | russellpope/devnet-express-dc | 4bdb2194abdee2fc950c2ff20e607aa4af9b68b5 | [
"Apache-2.0"
] | null | null | null | LM-1221 intro-python-xml/py-primer-1/concat_sol.py | russellpope/devnet-express-dc | 4bdb2194abdee2fc950c2ff20e607aa4af9b68b5 | [
"Apache-2.0"
] | null | null | null | myVarRed= "Red"
myVarBlue= "Blue"
print("Roses are Red. " + "Violets are Blue.")
print("Roses are " + myVarRed + ". Violets are " + myVarBlue)
myStr = "Roses are Red. " + "Violets are Blue."
varStr = "Roses are " + myVarRed + ". Violets are " + myVarBlue
print(myStr)
print(varStr)
name = "Joe"
feet= 6
inches= 2
print("My name is " + name + ". I'm " + str(feet) + " feet " + str(inches) + " inches tall.")
myStr = "My name is " + name + ". I'm " + str(feet) + " feet " + str(inches) + " inches tall."
print(myStr)
print(myVarRed + " roses can grow up to " + str(feet) + " feet!")
myStr = myVarBlue + " violets can grow up to " + str(inches) + " inches!"
print(myStr)
print("The " + myVarBlue + " sky turned " + myVarRed + "!") | 25.448276 | 94 | 0.601626 | myVarRed= "Red"
myVarBlue= "Blue"
print("Roses are Red. " + "Violets are Blue.")
print("Roses are " + myVarRed + ". Violets are " + myVarBlue)
myStr = "Roses are Red. " + "Violets are Blue."
varStr = "Roses are " + myVarRed + ". Violets are " + myVarBlue
print(myStr)
print(varStr)
name = "Joe"
feet= 6
inches= 2
print("My name is " + name + ". I'm " + str(feet) + " feet " + str(inches) + " inches tall.")
myStr = "My name is " + name + ". I'm " + str(feet) + " feet " + str(inches) + " inches tall."
print(myStr)
print(myVarRed + " roses can grow up to " + str(feet) + " feet!")
myStr = myVarBlue + " violets can grow up to " + str(inches) + " inches!"
print(myStr)
print("The " + myVarBlue + " sky turned " + myVarRed + "!") | 0 | 0 | 0 |
842007e2d61d97d0fa4ac492f9400db322530dc3 | 35 | py | Python | haul3/haul/platforms/android/__init__.py | hotkeymuc/haul | 22533491e3a22ce9fabd81f281282a09880b400c | [
"MIT"
] | 2 | 2021-07-04T13:00:50.000Z | 2022-03-19T21:39:06.000Z | haul3/haul/platforms/android/__init__.py | hotkeymuc/haul | 22533491e3a22ce9fabd81f281282a09880b400c | [
"MIT"
] | null | null | null | haul3/haul/platforms/android/__init__.py | hotkeymuc/haul | 22533491e3a22ce9fabd81f281282a09880b400c | [
"MIT"
] | null | null | null | __all__ = [
'builder_android',
] | 11.666667 | 20 | 0.628571 | __all__ = [
'builder_android',
] | 0 | 0 | 0 |
353c5aebfd3ddcbd66eae6f02131a97bfbbcb204 | 168 | py | Python | notmuchtask/cli/globals.py | neuhalje/notmuch-task | 096231e841b5996c85dd3f50bee02d26989a0505 | [
"0BSD",
"MIT"
] | 7 | 2019-06-11T10:39:09.000Z | 2022-01-18T17:53:33.000Z | notmuchtask/cli/globals.py | neuhalje/notmuch-task | 096231e841b5996c85dd3f50bee02d26989a0505 | [
"0BSD",
"MIT"
] | 1 | 2021-11-03T14:43:27.000Z | 2021-11-03T14:43:27.000Z | notmuchtask/cli/globals.py | neuhalje/notmuch-task | 096231e841b5996c85dd3f50bee02d26989a0505 | [
"0BSD",
"MIT"
] | null | null | null | from configparser import RawConfigParser
CONTEXT = Context()
| 16.8 | 50 | 0.732143 | from configparser import RawConfigParser
class Context(object):
def set_config(self, config: RawConfigParser):
self.config = config
CONTEXT = Context()
| 54 | 1 | 49 |
f7bbd0a87756108eeffd6e1b15b2b36e5c8c7aed | 434 | py | Python | setup.py | jab/hip | a42c11e6a77190809e37c2337c50b86baca2c9d9 | [
"MIT"
] | null | null | null | setup.py | jab/hip | a42c11e6a77190809e37c2337c50b86baca2c9d9 | [
"MIT"
] | null | null | null | setup.py | jab/hip | a42c11e6a77190809e37c2337c50b86baca2c9d9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import re
import unasync # requires pip>=10.0 for PEP 518 support
from setuptools import setup
# Get the version (borrowed from SQLAlchemy)
base_path = os.path.dirname(__file__)
with open(os.path.join(base_path, "src", "urllib3", "__init__.py")) as fp:
version = re.match(r".*__version__ = \"(.*?)\"", fp.read(), re.S).group(1)
setup(version=version, cmdclass={"build_py": unasync.build_py})
| 27.125 | 78 | 0.705069 | #!/usr/bin/env python
import os
import re
import unasync # requires pip>=10.0 for PEP 518 support
from setuptools import setup
# Get the version (borrowed from SQLAlchemy)
base_path = os.path.dirname(__file__)
with open(os.path.join(base_path, "src", "urllib3", "__init__.py")) as fp:
version = re.match(r".*__version__ = \"(.*?)\"", fp.read(), re.S).group(1)
setup(version=version, cmdclass={"build_py": unasync.build_py})
| 0 | 0 | 0 |
ee0e2a8c1e86cb0392c34b96fcd74204d4f78e8a | 308 | py | Python | amplification/tasks/__init__.py | rmoehn/amplification | fd1bed7c4fb7df4b017b900aa91d185dbe55519c | [
"MIT"
] | 8 | 2020-02-18T03:16:06.000Z | 2022-03-06T15:44:21.000Z | amplification/tasks/__init__.py | PenroseTiles/amplification | ef33a91db0e43ee085443205a2b784666214a121 | [
"MIT"
] | null | null | null | amplification/tasks/__init__.py | PenroseTiles/amplification | ef33a91db0e43ee085443205a2b784666214a121 | [
"MIT"
] | 2 | 2020-03-01T06:11:48.000Z | 2021-03-25T21:44:08.000Z | from amplification.tasks.equals import EqualsTask
from amplification.tasks.graph import GraphTask, MidpointTask
from amplification.tasks.sum import SumTask
from amplification.tasks.eval import EvalTask, EvalSumTask
from amplification.tasks.iterate import IterTask
from amplification.tasks.sat import SatTask
| 44 | 61 | 0.87013 | from amplification.tasks.equals import EqualsTask
from amplification.tasks.graph import GraphTask, MidpointTask
from amplification.tasks.sum import SumTask
from amplification.tasks.eval import EvalTask, EvalSumTask
from amplification.tasks.iterate import IterTask
from amplification.tasks.sat import SatTask
| 0 | 0 | 0 |
49e49460de4690f51b58645fac52eb85f273af88 | 468 | py | Python | tests/14_class_based_test.py | MaximeBarroin/test-ci-niveau-3 | cb0d7feda257f7ac3199d696dcbe735791d342f8 | [
"MIT"
] | null | null | null | tests/14_class_based_test.py | MaximeBarroin/test-ci-niveau-3 | cb0d7feda257f7ac3199d696dcbe735791d342f8 | [
"MIT"
] | null | null | null | tests/14_class_based_test.py | MaximeBarroin/test-ci-niveau-3 | cb0d7feda257f7ac3199d696dcbe735791d342f8 | [
"MIT"
] | null | null | null | class TestSimpleClass(object):
"""
Classes can still be used to organize collections of test cases, with
each test being a Method on the Class, rather than a standalone function.
"""
x = 1
y = 2
| 27.529412 | 77 | 0.647436 | class TestSimpleClass(object):
"""
Classes can still be used to organize collections of test cases, with
each test being a Method on the Class, rather than a standalone function.
"""
x = 1
y = 2
def regular_method(self):
print("\n(This is a regular, non-test-case method.)")
def test_two_checking_method(self):
print("\nRunning TestSimpleClass.test_twos_method")
assert self.x != 2
assert self.y == 2
| 194 | 0 | 54 |
6cbfe283c09e5b6fcb128a6b0257ddfdd443c402 | 967 | py | Python | tests/test_config.py | evanyerburgh/pybo | d26bb298992276949227b8c4c596f2c209cbb507 | [
"Apache-2.0"
] | null | null | null | tests/test_config.py | evanyerburgh/pybo | d26bb298992276949227b8c4c596f2c209cbb507 | [
"Apache-2.0"
] | null | null | null | tests/test_config.py | evanyerburgh/pybo | d26bb298992276949227b8c4c596f2c209cbb507 | [
"Apache-2.0"
] | null | null | null | # coding: utf8
from pybo import Config
from pathlib import Path
| 35.814815 | 112 | 0.672182 | # coding: utf8
from pybo import Config
from pathlib import Path
def test_config():
config = Config()
# default config filename
assert config.filename.name == 'pybo.yaml' # config.filename is a Path object
# paths for trie content
main, custom = config.get_tok_data_paths('POS')
# each profile contains one or more sections
assert [m for m in main] == ['lexica_bo', 'pos']
# each element in a Path object leading to a resource file
assert isinstance(main['pos'][0], Path)
# custom files to overwrite the existing trie can be added as follows
assert len(custom) == 0
main, custom = config.get_tok_data_paths('POS', modifs='trie_data/')
assert [c for c in custom] == ['lexica_bo', 'lemmas'] == [t.parts[-1] for t in Path('trie_data/').glob('*')]
# overwriting the main profile
main, custom = config.get_tok_data_paths('trie_data/', mode='custom')
assert [m for m in main] == ['lexica_bo', 'lemmas']
| 879 | 0 | 23 |
7ffea491a3bbb3f79f3e942d8d09033126c952f0 | 262 | py | Python | src/c_function.py | Command-Master/MCCC | a49440bfd8542002aee35d41bee093dc8b51d781 | [
"MIT"
] | 6 | 2021-01-15T03:49:01.000Z | 2021-11-02T10:43:22.000Z | src/c_function.py | Command-Master/MCCC | a49440bfd8542002aee35d41bee093dc8b51d781 | [
"MIT"
] | null | null | null | src/c_function.py | Command-Master/MCCC | a49440bfd8542002aee35d41bee093dc8b51d781 | [
"MIT"
] | null | null | null | from globals_consts import NAMESPACE, cname | 23.818182 | 68 | 0.671756 | from globals_consts import NAMESPACE, cname
class Function:
size = 1
def cast(self):
raise NotImplementedError('liken\'t cast function pointers')
def __init__(self, args, ret_type):
self.args = args
self.ret_type = ret_type | 135 | 61 | 23 |
601709bc965d0c99ca67dea3094943e5837a89d1 | 423 | py | Python | city_scrapers/spiders/wayne_cow.py | just-hugo/city-scrapers-det | 76b52f11506c99e19b7fcaf135cc7570257a2b62 | [
"MIT"
] | 1 | 2020-10-01T18:27:59.000Z | 2020-10-01T18:27:59.000Z | city_scrapers/spiders/wayne_cow.py | just-hugo/city-scrapers-det | 76b52f11506c99e19b7fcaf135cc7570257a2b62 | [
"MIT"
] | 9 | 2019-11-30T21:33:24.000Z | 2021-04-07T19:26:47.000Z | city_scrapers/spiders/wayne_cow.py | just-hugo/city-scrapers-det | 76b52f11506c99e19b7fcaf135cc7570257a2b62 | [
"MIT"
] | 5 | 2019-12-20T17:29:10.000Z | 2021-02-14T01:32:26.000Z | from city_scrapers_core.spiders import CityScrapersSpider
from city_scrapers.mixins.wayne_commission import WayneCommissionMixin
| 32.538462 | 84 | 0.777778 | from city_scrapers_core.spiders import CityScrapersSpider
from city_scrapers.mixins.wayne_commission import WayneCommissionMixin
class WayneCommitteeWholeSpider(WayneCommissionMixin, CityScrapersSpider):
name = "wayne_cow"
agency = "Wayne County Government"
start_urls = [
"https://www.waynecounty.com/elected/commission/committee-of-the-whole.aspx"
]
meeting_name = "Committee of the Whole"
| 0 | 269 | 23 |
c65c404008e35e66385951320b626f514aff10e0 | 6,971 | py | Python | detection_3d/tools/pylatex_tools.py | coolzhangfeng/lidar_dynamic_objects_detection | 8d64cc75202208549adef6c854bbb03c2b3c465a | [
"MIT"
] | 1 | 2020-11-07T01:42:12.000Z | 2020-11-07T01:42:12.000Z | detection_3d/tools/pylatex_tools.py | coolzhangfeng/lidar_dynamic_objects_detection | 8d64cc75202208549adef6c854bbb03c2b3c465a | [
"MIT"
] | null | null | null | detection_3d/tools/pylatex_tools.py | coolzhangfeng/lidar_dynamic_objects_detection | 8d64cc75202208549adef6c854bbb03c2b3c465a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
__copyright__ = """
Copyright (c) 2020 Tananaev Denis
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions: The above copyright notice and this permission
notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from pylatex import (
Document,
Command,
Section,
Subsection,
LongTable,
MultiColumn,
Figure,
SubFigure,
)
from pylatex.utils import italic, bold, NoEscape
import os
def create_long_table(
doc,
parameters,
skip_parameters=[],
table_specs=r"|p{0.45\linewidth}|p{0.45\linewidth}|",
header=[bold("Parameter"), bold("Value")],
):
"""
Helper function to create long table for parameters
Arguments:
doc: document to add table
parameters: parameters dict
skip_parameters: list of parameters to skip
table_specs: latex specific table settings
header: list with column names
"""
columns = len(header)
with doc.create(LongTable(table_spec=table_specs)) as data_table:
# Table header
data_table.add_hline()
data_table.add_row(header)
data_table.add_hline()
data_table.end_table_header()
data_table.add_row(
(MultiColumn(columns, align="r", data="Continued on Next Page"),)
)
data_table.end_table_footer()
data_table.add_row((MultiColumn(columns, align="r", data="End of Table"),))
data_table.end_table_last_footer()
for item in parameters:
if item not in skip_parameters:
data_table.add_row([item, str(parameters[item])])
data_table.add_hline()
def add_figure(doc, graphics_dir, image_name, width=r"0.5\linewidth"):
"""
Helper function to create figure
Arguments:
doc: document to add figure
graphics_dir: directory containing .png image
image_name: the name of image without extension
width: width of image in docement page
"""
image_filename = os.path.join(
os.path.dirname(__file__), graphics_dir, image_name + ".png"
)
with doc.create(Figure(position="h!")) as pic:
pic.add_image(image_filename, width=NoEscape(width))
pic.add_caption(image_name)
def add_sub_figure(doc, graphics_dir, image_names=[], captioning="Metrics"):
"""
Helper function to create multiple sub figures
Arguments:
doc: document to add figure
graphics_dir: directory containing .png image
image_names: the list of image names without extension
captioning: global captioning for the figure
"""
num_figures = len(image_names)
scale = 1.0 / num_figures
sub_width = str(scale) + r"\linewidth"
with doc.create(Figure(position="h!")) as fig:
for image in image_names:
image_filename = os.path.join(
os.path.dirname(__file__), graphics_dir, image + ".png"
)
with doc.create(
SubFigure(position="b", width=NoEscape(sub_width))
) as sub_fig:
sub_fig.add_image(image_filename, width=NoEscape(r"\linewidth"))
sub_fig.add_caption(image)
fig.add_caption(captioning)
def generate_latex_pdf(
graphics_dir,
output_dir,
report_dict,
report_name="experiment_report",
clean_tex=True,
):
"""
The function generates latex/pdf report from json dictionary
Arguments:
graphics_dir: directory containing .png images for report
output_dir: the directory to output report
report_dict: dictionary with report information
report_name: the name of output latex/pdf report
clean_tex: remove latex specific files
"""
output_filename = os.path.join(output_dir, report_name)
parameters = report_dict["parameters"]
report_name = parameters["experiment_info"]["experiment_name"].strip()
description = parameters["experiment_info"]["description"].strip()
authors = parameters["experiment_info"]["authors"].strip()
best_epoch = report_dict["best_epoch"]
main_metric = report_dict["main_metric"]
metric_value = float(report_dict["epoch_metrics"][best_epoch][main_metric]) * 100
result = "\nResult: Best epoch {} with {:.2f}% {}.".format(
best_epoch, metric_value, main_metric
)
# More dertails about page options: https://www.overleaf.com/learn/latex/page_size_and_margins
geometry_options = {
"tmargin": "1cm",
"bmargin": "3cm",
"lmargin": "2cm",
"rmargin": "2cm",
"includeheadfoot": True,
}
doc = Document(geometry_options=geometry_options, page_numbers=True)
doc.preamble.append(Command("title", "Experiment Report"))
doc.preamble.append(Command("author", authors))
doc.preamble.append(Command("date", report_dict["date"]))
doc.append(NoEscape(r"\maketitle"))
# We should handle in unique way in report each parameter which is not correspod {param : single_value}
skip_parameters = set(["experiment_info", "optimizer", "scheduler", "augment"])
with doc.create(Section(report_name)):
doc.append(italic("Description:\n"))
doc.append(description)
doc.append(bold(result))
with doc.create(Subsection("Parameters")):
create_long_table(doc, parameters, skip_parameters)
with doc.create(Subsection("Optimizer")):
create_long_table(doc, parameters["optimizer"])
with doc.create(Subsection("Scheduler")):
create_long_table(doc, parameters["scheduler"])
add_figure(doc, graphics_dir, "learning_rate_scheduler")
with doc.create(Subsection("Augmentations")):
create_long_table(doc, parameters["augment"])
with doc.create(Section("Data plots")):
image_names = ["loss_epoch_metrics"]
add_sub_figure(
doc, graphics_dir, image_names=image_names, captioning="Epoch metrics"
)
# add_figure(doc, graphics_dir, "accuracy_epoch_metrics")
doc.generate_pdf(output_filename, clean_tex=clean_tex)
| 37.278075 | 107 | 0.677091 | #!/usr/bin/env python
__copyright__ = """
Copyright (c) 2020 Tananaev Denis
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions: The above copyright notice and this permission
notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from pylatex import (
Document,
Command,
Section,
Subsection,
LongTable,
MultiColumn,
Figure,
SubFigure,
)
from pylatex.utils import italic, bold, NoEscape
import os
def create_long_table(
doc,
parameters,
skip_parameters=[],
table_specs=r"|p{0.45\linewidth}|p{0.45\linewidth}|",
header=[bold("Parameter"), bold("Value")],
):
"""
Helper function to create long table for parameters
Arguments:
doc: document to add table
parameters: parameters dict
skip_parameters: list of parameters to skip
table_specs: latex specific table settings
header: list with column names
"""
columns = len(header)
with doc.create(LongTable(table_spec=table_specs)) as data_table:
# Table header
data_table.add_hline()
data_table.add_row(header)
data_table.add_hline()
data_table.end_table_header()
data_table.add_row(
(MultiColumn(columns, align="r", data="Continued on Next Page"),)
)
data_table.end_table_footer()
data_table.add_row((MultiColumn(columns, align="r", data="End of Table"),))
data_table.end_table_last_footer()
for item in parameters:
if item not in skip_parameters:
data_table.add_row([item, str(parameters[item])])
data_table.add_hline()
def add_figure(doc, graphics_dir, image_name, width=r"0.5\linewidth"):
"""
Helper function to create figure
Arguments:
doc: document to add figure
graphics_dir: directory containing .png image
image_name: the name of image without extension
width: width of image in docement page
"""
image_filename = os.path.join(
os.path.dirname(__file__), graphics_dir, image_name + ".png"
)
with doc.create(Figure(position="h!")) as pic:
pic.add_image(image_filename, width=NoEscape(width))
pic.add_caption(image_name)
def add_sub_figure(doc, graphics_dir, image_names=[], captioning="Metrics"):
"""
Helper function to create multiple sub figures
Arguments:
doc: document to add figure
graphics_dir: directory containing .png image
image_names: the list of image names without extension
captioning: global captioning for the figure
"""
num_figures = len(image_names)
scale = 1.0 / num_figures
sub_width = str(scale) + r"\linewidth"
with doc.create(Figure(position="h!")) as fig:
for image in image_names:
image_filename = os.path.join(
os.path.dirname(__file__), graphics_dir, image + ".png"
)
with doc.create(
SubFigure(position="b", width=NoEscape(sub_width))
) as sub_fig:
sub_fig.add_image(image_filename, width=NoEscape(r"\linewidth"))
sub_fig.add_caption(image)
fig.add_caption(captioning)
def generate_latex_pdf(
graphics_dir,
output_dir,
report_dict,
report_name="experiment_report",
clean_tex=True,
):
"""
The function generates latex/pdf report from json dictionary
Arguments:
graphics_dir: directory containing .png images for report
output_dir: the directory to output report
report_dict: dictionary with report information
report_name: the name of output latex/pdf report
clean_tex: remove latex specific files
"""
output_filename = os.path.join(output_dir, report_name)
parameters = report_dict["parameters"]
report_name = parameters["experiment_info"]["experiment_name"].strip()
description = parameters["experiment_info"]["description"].strip()
authors = parameters["experiment_info"]["authors"].strip()
best_epoch = report_dict["best_epoch"]
main_metric = report_dict["main_metric"]
metric_value = float(report_dict["epoch_metrics"][best_epoch][main_metric]) * 100
result = "\nResult: Best epoch {} with {:.2f}% {}.".format(
best_epoch, metric_value, main_metric
)
# More dertails about page options: https://www.overleaf.com/learn/latex/page_size_and_margins
geometry_options = {
"tmargin": "1cm",
"bmargin": "3cm",
"lmargin": "2cm",
"rmargin": "2cm",
"includeheadfoot": True,
}
doc = Document(geometry_options=geometry_options, page_numbers=True)
doc.preamble.append(Command("title", "Experiment Report"))
doc.preamble.append(Command("author", authors))
doc.preamble.append(Command("date", report_dict["date"]))
doc.append(NoEscape(r"\maketitle"))
# We should handle in unique way in report each parameter which is not correspod {param : single_value}
skip_parameters = set(["experiment_info", "optimizer", "scheduler", "augment"])
with doc.create(Section(report_name)):
doc.append(italic("Description:\n"))
doc.append(description)
doc.append(bold(result))
with doc.create(Subsection("Parameters")):
create_long_table(doc, parameters, skip_parameters)
with doc.create(Subsection("Optimizer")):
create_long_table(doc, parameters["optimizer"])
with doc.create(Subsection("Scheduler")):
create_long_table(doc, parameters["scheduler"])
add_figure(doc, graphics_dir, "learning_rate_scheduler")
with doc.create(Subsection("Augmentations")):
create_long_table(doc, parameters["augment"])
with doc.create(Section("Data plots")):
image_names = ["loss_epoch_metrics"]
add_sub_figure(
doc, graphics_dir, image_names=image_names, captioning="Epoch metrics"
)
# add_figure(doc, graphics_dir, "accuracy_epoch_metrics")
doc.generate_pdf(output_filename, clean_tex=clean_tex)
| 0 | 0 | 0 |
a9fb228073ab41e1a635258e8d29d33c452c0848 | 483 | py | Python | pandas/tests/indexes/conftest.py | naomi172839/pandas | c5f11ab79e5553a28a91fc7036c8dcbfc8cbc697 | [
"BSD-3-Clause"
] | 6 | 2020-09-10T15:03:25.000Z | 2021-04-01T22:48:33.000Z | pandas/tests/indexes/conftest.py | naomi172839/pandas | c5f11ab79e5553a28a91fc7036c8dcbfc8cbc697 | [
"BSD-3-Clause"
] | 1 | 2020-04-05T16:02:27.000Z | 2020-04-05T16:02:27.000Z | pandas/tests/indexes/conftest.py | naomi172839/pandas | c5f11ab79e5553a28a91fc7036c8dcbfc8cbc697 | [
"BSD-3-Clause"
] | 4 | 2020-02-07T05:05:32.000Z | 2020-05-11T06:06:17.000Z | import pytest
@pytest.fixture(params=[None, False])
def sort(request):
"""
Valid values for the 'sort' parameter used in the Index
setops methods (intersection, union, etc.)
Caution:
Don't confuse this one with the "sort" fixture used
for DataFrame.append or concat. That one has
parameters [True, False].
We can't combine them as sort=True is not permitted
in in the Index setops methods.
"""
return request.param
| 25.421053 | 59 | 0.656315 | import pytest
@pytest.fixture(params=[None, False])
def sort(request):
"""
Valid values for the 'sort' parameter used in the Index
setops methods (intersection, union, etc.)
Caution:
Don't confuse this one with the "sort" fixture used
for DataFrame.append or concat. That one has
parameters [True, False].
We can't combine them as sort=True is not permitted
in in the Index setops methods.
"""
return request.param
| 0 | 0 | 0 |
37b1f19266734c9c1a9e6de7cf06f8e23a8796a7 | 3,414 | py | Python | yardstick/tests/unit/common/messaging/test_payloads.py | upfront710/yardstick | 2c3898f2ca061962cedbfc7435f78b59aa39b097 | [
"Apache-2.0"
] | 28 | 2017-02-07T07:46:42.000Z | 2021-06-30T08:11:06.000Z | yardstick/tests/unit/common/messaging/test_payloads.py | upfront710/yardstick | 2c3898f2ca061962cedbfc7435f78b59aa39b097 | [
"Apache-2.0"
] | 6 | 2018-01-18T08:00:54.000Z | 2019-04-11T04:51:41.000Z | yardstick/tests/unit/common/messaging/test_payloads.py | upfront710/yardstick | 2c3898f2ca061962cedbfc7435f78b59aa39b097 | [
"Apache-2.0"
] | 46 | 2016-12-13T10:05:47.000Z | 2021-02-18T07:33:06.000Z | # Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from yardstick.common import exceptions
from yardstick.common.messaging import payloads
from yardstick.tests.unit import base as ut_base
| 41.13253 | 76 | 0.702402 | # Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from yardstick.common import exceptions
from yardstick.common.messaging import payloads
from yardstick.tests.unit import base as ut_base
class _DummyPayload(payloads.Payload):
REQUIRED_FIELDS = {'version', 'key1', 'key2'}
class PayloadTestCase(ut_base.BaseUnitTestCase):
def test__init(self):
payload = _DummyPayload(version=1, key1='value1', key2='value2')
self.assertEqual(1, payload.version)
self.assertEqual('value1', payload.key1)
self.assertEqual('value2', payload.key2)
self.assertEqual(3, len(payload._fields))
def test__init_missing_required_fields(self):
with self.assertRaises(exceptions.PayloadMissingAttributes):
_DummyPayload(key1='value1', key2='value2')
def test_obj_to_dict(self):
payload = _DummyPayload(version=1, key1='value1', key2='value2')
payload_dict = payload.obj_to_dict()
self.assertEqual({'version': 1, 'key1': 'value1', 'key2': 'value2'},
payload_dict)
def test_dict_to_obj(self):
_dict = {'version': 2, 'key1': 'value100', 'key2': 'value200'}
payload = _DummyPayload.dict_to_obj(_dict)
self.assertEqual(set(_dict.keys()), payload._fields)
class TrafficGeneratorPayloadTestCase(ut_base.BaseUnitTestCase):
def test_init(self):
tg_payload = payloads.TrafficGeneratorPayload(
version=1, iteration=10, kpi={'key1': 'value1'})
self.assertEqual(1, tg_payload.version)
self.assertEqual(10, tg_payload.iteration)
self.assertEqual({'key1': 'value1'}, tg_payload.kpi)
self.assertEqual(3, len(tg_payload._fields))
def test__init_missing_required_fields(self):
with self.assertRaises(exceptions.PayloadMissingAttributes):
payloads.TrafficGeneratorPayload(version=1, iteration=10)
with self.assertRaises(exceptions.PayloadMissingAttributes):
payloads.TrafficGeneratorPayload(iteration=10, kpi={})
with self.assertRaises(exceptions.PayloadMissingAttributes):
payloads.TrafficGeneratorPayload(iteration=10)
class RunnerPayloadTestCase(ut_base.BaseUnitTestCase):
def test_init(self):
runner_payload = payloads.RunnerPayload(version=5,
data={'key1': 'value1'})
self.assertEqual(5, runner_payload.version)
self.assertEqual({'key1': 'value1'}, runner_payload.data)
def test__init_missing_required_fields(self):
with self.assertRaises(exceptions.PayloadMissingAttributes):
payloads.RunnerPayload(version=1)
with self.assertRaises(exceptions.PayloadMissingAttributes):
payloads.RunnerPayload(data=None)
with self.assertRaises(exceptions.PayloadMissingAttributes):
payloads.RunnerPayload()
| 2,208 | 170 | 308 |
73f61a2414248de653696a251d78ce78694bfc78 | 5,258 | py | Python | fileprocess/mergefile/filebuf.py | edonyzpc/toolkitem | 3a09ebf45eee8ecd9ff0e441392d5fc746b996e5 | [
"MIT"
] | 3 | 2015-04-20T08:17:09.000Z | 2020-07-07T15:22:06.000Z | fileprocess/mergefile/filebuf.py | edonyzpc/toolkitem | 3a09ebf45eee8ecd9ff0e441392d5fc746b996e5 | [
"MIT"
] | 24 | 2015-11-14T14:54:59.000Z | 2017-10-23T15:14:45.000Z | fileprocess/mergefile/filebuf.py | edonyzpc/toolkitem | 3a09ebf45eee8ecd9ff0e441392d5fc746b996e5 | [
"MIT"
] | 1 | 2017-02-28T06:35:44.000Z | 2017-02-28T06:35:44.000Z | # -*- coding: utf-8 -*-
r"""
# .---. .-----------
# / \ __ / ------
# / / \( )/ ----- (`-') _ _(`-') <-. (`-')_
# ////// '\/ ` --- ( OO).-/( (OO ).-> .-> \( OO) ) .->
# //// / // : : --- (,------. \ .'_ (`-')----. ,--./ ,--/ ,--.' ,-.
# // / / / `\/ '-- | .---' '`'-..__)( OO).-. ' | \ | | (`-')'.' /
# // //..\\ (| '--. | | ' |( _) | | | | . '| |)(OO \ /
# ============UU====UU==== | .--' | | / : \| |)| | | |\ | | / /)
# '//||\\` | `---. | '-' / ' '-' ' | | \ | `-/ /`
# ''`` `------' `------' `-----' `--' `--' `--'
# ######################################################################################
#
# Author: edony - edonyzpc@gmail.com
#
# twitter : @edonyzpc
#
# Last modified: 2015-05-10 15:02
#
# Filename: filebuf.py
#
# Description: All Rights Are Reserved
#
"""
class PyColor(object):
""" This class is for colored print in the python interpreter!
"F3" call Addpy() function to add this class which is defined
in the .vimrc for vim Editor."""
@property
def new(self):
"""
Customized Python Print Color.
"""
return self._newcolor
@new.setter
def new(self,color_str):
"""
New Color.
"""
self._newcolor = color_str
def disable(self):
"""
Disable Color Print.
"""
self.warningcolor = ''
self.endcolor = ''
class FileBuf(object):
"""
FILEBUF: class to write the each different lines into buffer file named `tmp`.
"""
def __init__(self, file1, file2):
"""
Initialize the instance attributes: [file1, file2, file1_line_num, file2_line_num]
"""
self.file1 = file1
self.file2 = file2
self.file1_line_num = len(open(self.file1).readlines())
self.file2_line_num = len(open(self.file2).readlines())
self.buffer = []
def mark_diff(self):
"""
Mark up the different lines into buffer
"""
f1 = open(self.file1)
f2 = open(self.file2)
if self.file1_line_num > self.file2_line_num:
line1_num_counter = 0
line2_num_counter = 0
for line1 in f1.readlines():
line2 = f2.readline()
line1_num_counter += 1
line2_num_counter += 1
if line1 == line2:
continue
else:
if line1 == '':
line1 = line1 + '\n'
if line2 == '':
line2 = line2 + '\n'
line1 = str(line1_num_counter) + '-' + line1
line2 = str(line2_num_counter) + '-' + line2
self.buffer.append(line1)
self.buffer.append(line2)
else:
line1_num_counter = 0
line2_num_counter = 0
for line2 in f2.readlines():
line1 = f1.readline()
line1_num_counter += 1
line2_num_counter += 1
if line1 == line2:
continue
else:
if line1 == '':
line1 = line1 + '\n'
if line2 == '':
line2 = line2 + '\n'
line1 = str(line1_num_counter) + '+' + line1
line2 = str(line2_num_counter) + '+' + line2
self.buffer.append(line1)
self.buffer.append(line2)
def write_file(self):
"""
Write the buffer into buffer file `tmp` in current direction
"""
file_write = open('tmp','w')
for line in self.buffer:
file_write.write(line)
if __name__ == '__main__':
test_file_buf = FileBuf('f2.txt', 'f1.txt')
test_file_buf.mark_diff()
test_file_buf.write_file()
| 35.288591 | 90 | 0.386649 | # -*- coding: utf-8 -*-
r"""
# .---. .-----------
# / \ __ / ------
# / / \( )/ ----- (`-') _ _(`-') <-. (`-')_
# ////// '\/ ` --- ( OO).-/( (OO ).-> .-> \( OO) ) .->
# //// / // : : --- (,------. \ .'_ (`-')----. ,--./ ,--/ ,--.' ,-.
# // / / / `\/ '-- | .---' '`'-..__)( OO).-. ' | \ | | (`-')'.' /
# // //..\\ (| '--. | | ' |( _) | | | | . '| |)(OO \ /
# ============UU====UU==== | .--' | | / : \| |)| | | |\ | | / /)
# '//||\\` | `---. | '-' / ' '-' ' | | \ | `-/ /`
# ''`` `------' `------' `-----' `--' `--' `--'
# ######################################################################################
#
# Author: edony - edonyzpc@gmail.com
#
# twitter : @edonyzpc
#
# Last modified: 2015-05-10 15:02
#
# Filename: filebuf.py
#
# Description: All Rights Are Reserved
#
"""
class PyColor(object):
""" This class is for colored print in the python interpreter!
"F3" call Addpy() function to add this class which is defined
in the .vimrc for vim Editor."""
def __init__(self):
self.self_doc = r"""
STYLE: \033['display model';'foreground';'background'm
DETAILS:
FOREGROUND BACKGOUND COLOR
---------------------------------------
30 40 black
31 41 red
32 42 green
33 43 yellow
34 44 blue
35 45 purple
36 46 cyan
37 47 white
DISPLAY MODEL DETAILS
-------------------------
0 default
1 highlight
4 underline
5 flicker
7 reverse
8 non-visiable
e.g:
\033[1;31;40m <!--1-highlight;31-foreground red;40-background black-->
\033[0m <!--set all into default-->
"""
self.warningcolor = '\033[0;37;41m'
self.tipcolor = '\033[0;31;42m'
self.endcolor = '\033[0m'
self._newcolor = ''
@property
def new(self):
"""
Customized Python Print Color.
"""
return self._newcolor
@new.setter
def new(self,color_str):
"""
New Color.
"""
self._newcolor = color_str
def disable(self):
"""
Disable Color Print.
"""
self.warningcolor = ''
self.endcolor = ''
class FileBuf(object):
"""
FILEBUF: class to write the each different lines into buffer file named `tmp`.
"""
def __init__(self, file1, file2):
"""
Initialize the instance attributes: [file1, file2, file1_line_num, file2_line_num]
"""
self.file1 = file1
self.file2 = file2
self.file1_line_num = len(open(self.file1).readlines())
self.file2_line_num = len(open(self.file2).readlines())
self.buffer = []
def mark_diff(self):
"""
Mark up the different lines into buffer
"""
f1 = open(self.file1)
f2 = open(self.file2)
if self.file1_line_num > self.file2_line_num:
line1_num_counter = 0
line2_num_counter = 0
for line1 in f1.readlines():
line2 = f2.readline()
line1_num_counter += 1
line2_num_counter += 1
if line1 == line2:
continue
else:
if line1 == '':
line1 = line1 + '\n'
if line2 == '':
line2 = line2 + '\n'
line1 = str(line1_num_counter) + '-' + line1
line2 = str(line2_num_counter) + '-' + line2
self.buffer.append(line1)
self.buffer.append(line2)
else:
line1_num_counter = 0
line2_num_counter = 0
for line2 in f2.readlines():
line1 = f1.readline()
line1_num_counter += 1
line2_num_counter += 1
if line1 == line2:
continue
else:
if line1 == '':
line1 = line1 + '\n'
if line2 == '':
line2 = line2 + '\n'
line1 = str(line1_num_counter) + '+' + line1
line2 = str(line2_num_counter) + '+' + line2
self.buffer.append(line1)
self.buffer.append(line2)
def write_file(self):
"""
Write the buffer into buffer file `tmp` in current direction
"""
file_write = open('tmp','w')
for line in self.buffer:
file_write.write(line)
if __name__ == '__main__':
test_file_buf = FileBuf('f2.txt', 'f1.txt')
test_file_buf.mark_diff()
test_file_buf.write_file()
| 1,163 | 0 | 26 |
e82d465500d9c28a0dec8c45feff5a27425e0085 | 6,085 | py | Python | solr-admin-app/solr_admin/views/synonym_view.py | sumesh-aot/namex | 53e11aed5ea550b71b7b983f1b57b65db5a06766 | [
"Apache-2.0"
] | 4 | 2018-10-05T23:41:05.000Z | 2019-06-19T16:17:50.000Z | solr-admin-app/solr_admin/views/synonym_view.py | sumesh-aot/namex | 53e11aed5ea550b71b7b983f1b57b65db5a06766 | [
"Apache-2.0"
] | 635 | 2018-05-31T04:12:46.000Z | 2022-03-31T18:45:42.000Z | solr-admin-app/solr_admin/views/synonym_view.py | rarmitag/namex | 1b308bf96130619d4a61d44e075cc7ab177dc6cd | [
"Apache-2.0"
] | 71 | 2018-05-14T20:47:55.000Z | 2022-03-31T23:08:30.000Z |
import re
from wtforms import validators
from solr_admin import keycloak
from solr_admin import models
from solr_admin import solr
from solr_admin.models import synonym_audit
# The customized ModelView that is used for working with the synonyms.
from solr_admin.services.get_stems import get_stems
from solr_admin.services.get_multi_word_synonyms import get_multi_word_synonyms
from solr_admin.views.secured_view import SecuredView
# Validate the Synonyms Text and ensure it meets our standards.
# Check for multi-word synonyms
# Only a-z, 0-9, and space are allowed in the synonyms.
# Multiple spaces are not allowed.
# Duplicate values are not allowed.
# Ensure that there is more than one value.
# Put a CSV string into alphabetical order, and format nicely.
# Do the audit logging - we will write the complete record, not the delta (although the latter is possible).
| 34.185393 | 117 | 0.708463 |
import re
from wtforms import validators
from solr_admin import keycloak
from solr_admin import models
from solr_admin import solr
from solr_admin.models import synonym_audit
# The customized ModelView that is used for working with the synonyms.
from solr_admin.services.get_stems import get_stems
from solr_admin.services.get_multi_word_synonyms import get_multi_word_synonyms
from solr_admin.views.secured_view import SecuredView
class SynonymView(SecuredView):
# We're unlikely to do multiple deletes, so just get rid of the checkboxes and the drop down for delete.
action_disallowed_list = ['delete']
# list of model columns
column_list = ('category', 'synonyms_text', 'stems_text', 'comment', 'enabled')
# Allow export as a CSV file.
can_export = True
# Allow the user to change the page size.
can_set_page_size = True
# Keep everything sorted, although realistically also we need to sort the values within a row before it is saved.
column_default_sort = 'synonyms_text'
# For some reason this needs to be initialized, but we will override it in is_accessible.
column_editable_list = ['category', 'synonyms_text', 'comment']
# List of visible columns
form_columns = ['category', 'synonyms_text', 'comment']
# Allow the user to filter on the category column.
column_filters = ['category', 'synonyms_text', 'comment' ]
# Search within the synonyms_text.
column_searchable_list = ['category', 'synonyms_text', 'comment']
# Use a custom create.html that warns the user about sorting what they enter.
create_template = 'synonyms_create.html'
# Use a custom edit.html that warns the user about sorting what they enter.
edit_template = 'synonyms_edit.html'
# Use a custom list.html that provides a page size drop down with extra choices.
list_template = 'synonyms_list.html'
# When the user goes to save the data, trim whitespace and put the list back into alphabetical order.
def on_model_change(self, form, model, is_created):
model.synonyms_text = _alphabetize_csv(model.synonyms_text)
_validate_synonyms_text(model.synonyms_text)
# After saving the data create the audit log (we need to wait for a synonym.id value when creating)
def after_model_change(self, form, model, is_created):
if is_created:
_create_audit_log(model, 'CREATE')
else:
_create_audit_log(model, 'UPDATE')
model.stems_text = get_stems(model.synonyms_text)
self.session.commit()
#solr.reload_solr_cores()
# After deleting the data create the audit log.
def after_model_delete(self, model):
_create_audit_log(model, 'DELETE')
#solr.reload_solr_cores()
# Validate the Synonyms Text and ensure it meets our standards.
def _validate_synonyms_text(synonyms_text: str) -> None:
# Split into comma-separated words.
values = synonyms_text.split(',')
# Strip leading and trailing spaces.
values = list(map(str.strip, values))
_validation_multi_word_check(values)
_validation_character_check(values)
_validation_multiple_spaces(values)
_validation_duplicates_check(values)
_validation_minimum_count(values)
# Check for multi-word synonyms
def _validation_multi_word_check(values) -> None:
disallowed_values = get_multi_word_synonyms(values)
if disallowed_values:
raise validators.ValidationError(
'Multi-word synonyms text cannot be processed here, please contact application support. ({})'
.format(', '.join(disallowed_values)))
# Only a-z, 0-9, and space are allowed in the synonyms.
def _validation_character_check(values) -> None:
disallowed_values = []
for value in values:
if re.search('[^a-z0-9 ]', value):
disallowed_values.append(value)
if disallowed_values:
raise validators.ValidationError(
'Synonyms Text only allows lower case letters, digits, and space characters ({})'
.format(', '.join(disallowed_values)))
# Multiple spaces are not allowed.
def _validation_multiple_spaces(values) -> None:
multiple_spaces = []
for value in values:
if ' ' in value:
multiple_spaces.append(value)
if multiple_spaces:
raise validators.ValidationError(
'Synonyms Text does not allow multiple embedded spaces ({})'.format(', '.join(multiple_spaces)))
# Duplicate values are not allowed.
def _validation_duplicates_check(values) -> None:
duplicate_values = []
previous_value = ''
for value in values:
if value == previous_value:
duplicate_values.append(value)
previous_value = value
if duplicate_values:
# Remove duplicates, in the case of have triples or more.
duplicate_values = list(set(duplicate_values))
duplicate_values.sort()
raise validators.ValidationError(
'Synonyms Text does not allow duplicate values ({})'.format(', '.join(duplicate_values)))
# Ensure that there is more than one value.
def _validation_minimum_count(values) -> None:
if len(values) == 1:
raise validators.ValidationError('Synonyms Text must contain more than one value')
# Put a CSV string into alphabetical order, and format nicely.
def _alphabetize_csv(string: str) -> str:
# Split into comma-separated words.
values = string.split(',')
# Strip leading and trailing spaces.
values = list(map(str.strip, values))
# Remove empty strings.
values = list(filter(None, values))
# Sort alphabetically.
values.sort()
return ', '.join(values)
# Do the audit logging - we will write the complete record, not the delta (although the latter is possible).
def _create_audit_log(model, action) -> None:
audit = synonym_audit.SynonymAudit(
keycloak.Keycloak(None).get_username(), action, model.id, model.category, model.synonyms_text, model.comment,
model.enabled)
session = models.db.session
session.add(audit)
session.commit()
| 3,224 | 1,770 | 199 |
1af7590f916bd3c3dd5e8addcc07a4b7e95fd3b0 | 1,963 | py | Python | src/sgd.py | jmarrietar/suncet | 43868f7863e329e2db94f07e983f547add1bc495 | [
"MIT"
] | 413 | 2020-12-01T19:10:19.000Z | 2022-03-30T21:03:34.000Z | src/sgd.py | amalbinessa/suncet | 731547d727b8c94d06c08a7848b4955de3a70cea | [
"MIT"
] | 25 | 2021-05-03T01:26:39.000Z | 2022-03-24T01:13:08.000Z | src/sgd.py | amalbinessa/suncet | 731547d727b8c94d06c08a7848b4955de3a70cea | [
"MIT"
] | 57 | 2021-04-30T20:05:42.000Z | 2022-02-25T19:01:17.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
from torch.optim import Optimizer
| 32.716667 | 83 | 0.532348 | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
from torch.optim import Optimizer
class SGD(Optimizer):
def __init__(self, params, lr, momentum=0, weight_decay=0, nesterov=False):
if lr < 0.0:
raise ValueError(f'Invalid learning rate: {lr}')
if momentum < 0.0:
raise ValueError(f'Invalid momentum value: {momentum}')
if weight_decay < 0.0:
raise ValueError(f'Invalid weight_decay value: {weight_decay}')
if nesterov and (momentum == 0.0):
raise ValueError(f'Nesterov needs momentum > 0')
defaults = dict(lr=lr, momentum=momentum, weight_decay=weight_decay,
nesterov=nesterov)
super(SGD, self).__init__(params, defaults)
@torch.no_grad()
def step(self):
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad
if weight_decay != 0:
d_p = d_p.add(p, alpha=weight_decay)
d_p.mul_(-group['lr'])
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = d_p.clone().detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(d_p)
if nesterov:
d_p = d_p.add(buf, alpha=momentum)
else:
d_p = buf
p.add_(d_p)
return None
| 1,618 | 75 | 23 |
f673acd60723c0325469403a0d884d2a910fe005 | 1,512 | py | Python | ks8-2/server/controllers/todo.py | nabbott2008/ks | 888b7dfc4541199f31eef74edaca477f3ce48e6e | [
"MIT"
] | 225 | 2017-11-20T21:21:37.000Z | 2022-03-10T14:15:17.000Z | ks8-2/server/controllers/todo.py | cglacet/ks | 7e5331218cff0bed4342c0f0318ff91cf2ecbd67 | [
"MIT"
] | 12 | 2017-11-23T10:56:11.000Z | 2019-09-04T08:19:13.000Z | ks8-2/server/controllers/todo.py | cglacet/ks | 7e5331218cff0bed4342c0f0318ff91cf2ecbd67 | [
"MIT"
] | 93 | 2017-11-02T09:33:24.000Z | 2022-02-28T11:29:01.000Z | 'todo list controller'
import json
from flask import request
from flask import jsonify
from flask import current_app
import data.database as database
def list_items():
'GET todo list'
current_app.logger.info('todo controller called, func: list')
db = database.Database(current_app.config['CONN_STRING'])
items = db.get_items()
return jsonify({
'todoList': items
})
def add():
'POST add item into todo list'
current_app.logger.info('todo controller called, func: add')
data = json.loads(request.data.decode("utf-8"))
item = data['newItem']
db = database.Database(current_app.config['CONN_STRING'])
db.insert_item(item)
items = db.get_items()
return jsonify({
'todoList': items
})
def delete():
'POST delete item from list'
current_app.logger.info('todo controller called, func: delete')
data = json.loads(request.data.decode("utf-8"))
item = data['itemToDelete']
db = database.Database(current_app.config['CONN_STRING'])
db.delete_item(item)
items = db.get_items()
return jsonify({
'todoList': items
})
def item_update():
'POST update item in list'
current_app.logger.info('todo controller called, func: item_update')
data = json.loads(request.data.decode('utf-8'))
item = data['itemToUpdate']
db = database.Database(current_app.config['CONN_STRING'])
db.update_item(item)
items = db.get_items()
return jsonify({
'todoList': items
})
| 24 | 72 | 0.666667 | 'todo list controller'
import json
from flask import request
from flask import jsonify
from flask import current_app
import data.database as database
def list_items():
'GET todo list'
current_app.logger.info('todo controller called, func: list')
db = database.Database(current_app.config['CONN_STRING'])
items = db.get_items()
return jsonify({
'todoList': items
})
def add():
'POST add item into todo list'
current_app.logger.info('todo controller called, func: add')
data = json.loads(request.data.decode("utf-8"))
item = data['newItem']
db = database.Database(current_app.config['CONN_STRING'])
db.insert_item(item)
items = db.get_items()
return jsonify({
'todoList': items
})
def delete():
'POST delete item from list'
current_app.logger.info('todo controller called, func: delete')
data = json.loads(request.data.decode("utf-8"))
item = data['itemToDelete']
db = database.Database(current_app.config['CONN_STRING'])
db.delete_item(item)
items = db.get_items()
return jsonify({
'todoList': items
})
def item_update():
'POST update item in list'
current_app.logger.info('todo controller called, func: item_update')
data = json.loads(request.data.decode('utf-8'))
item = data['itemToUpdate']
db = database.Database(current_app.config['CONN_STRING'])
db.update_item(item)
items = db.get_items()
return jsonify({
'todoList': items
})
| 0 | 0 | 0 |
c515e211deadee410b0cb9289bd5dd7e0a11e5ad | 311 | py | Python | models/__init__.py | Louis-udm/Word-Grounded-Graph-Convolutional-Network | 4c90bff0ec8bcdd8994154eead0efb5a3caefca7 | [
"MIT"
] | null | null | null | models/__init__.py | Louis-udm/Word-Grounded-Graph-Convolutional-Network | 4c90bff0ec8bcdd8994154eead0efb5a3caefca7 | [
"MIT"
] | null | null | null | models/__init__.py | Louis-udm/Word-Grounded-Graph-Convolutional-Network | 4c90bff0ec8bcdd8994154eead0efb5a3caefca7 | [
"MIT"
] | null | null | null | """
The model package
"""
from models.gcn import GCN_2Layers
from models.mlp import MLP_1h, MLP_2h
from models.wgcn import WGCN, WGCN_embedding_classifier, WGCN_VocabEmbedding
__all__ = [
"MLP_1h",
"MLP_2h",
"GCN_2Layers",
"WGCN",
"WGCN_embedding_classifier",
"WGCN_VocabEmbedding",
]
| 18.294118 | 76 | 0.713826 | """
The model package
"""
from models.gcn import GCN_2Layers
from models.mlp import MLP_1h, MLP_2h
from models.wgcn import WGCN, WGCN_embedding_classifier, WGCN_VocabEmbedding
__all__ = [
"MLP_1h",
"MLP_2h",
"GCN_2Layers",
"WGCN",
"WGCN_embedding_classifier",
"WGCN_VocabEmbedding",
]
| 0 | 0 | 0 |
0d9377f8193646dade6cba7c117254caa1149f09 | 2,746 | py | Python | day14/day14.py | andreaskaempf/adventofcode2021 | 6a72c64e8258cf4e69b5d4602ae194cd27492017 | [
"MIT"
] | null | null | null | day14/day14.py | andreaskaempf/adventofcode2021 | 6a72c64e8258cf4e69b5d4602ae194cd27492017 | [
"MIT"
] | null | null | null | day14/day14.py | andreaskaempf/adventofcode2021 | 6a72c64e8258cf4e69b5d4602ae194cd27492017 | [
"MIT"
] | null | null | null | # Advent of Code 2021, Day 14
#
# Apply character insertion rules to a sequence of characters,
# runs out of memory if you try to build up character strings,
# so had to build dictionary of pairs of characters.
#
# AK, 14/12/2021
import time
t0 = time.time()
# Input file name
f = 'sample.txt'
f = 'input.txt'
# Read data, pattern on line 1, rules thereafter
lines = [l.strip() for l in open(f)]
patt = None
rules = {}
for l in lines:
if not patt:
patt = l
elif len(l) > 0: # "AB -> C" to dictionary
rules[l[:2]] = l[6]
# Parse starting pattern, and get frequency counts of letters,
# and number of transitions
trans = {} # "AB" -> count
counts = {} # 'A' -> count
prevC = None
for c in patt:
counts[c] = counts.get(c,0) + 1
if prevC:
t = prevC + c
trans[t] = trans.get(t,0) + 1
prevC = c
# Show starting data
print('Transitions:', trans)
print('Chars:', counts)
print('Rules:', rules)
# Do one iteration
# Do iterations (10 for Part 1, 40 for Part 2)
for i in range(40):
print('\nIteration', i+1)
iter()
print('Counts:', counts)
# Show final results
print('\nFinal character counts:', counts)
print('\nMax - min counts:', max(counts.values()) - min(counts.values()))
print(time.time() - t0, 'secs')
| 26.403846 | 76 | 0.593955 | # Advent of Code 2021, Day 14
#
# Apply character insertion rules to a sequence of characters,
# runs out of memory if you try to build up character strings,
# so had to build dictionary of pairs of characters.
#
# AK, 14/12/2021
import time
t0 = time.time()
# Input file name
f = 'sample.txt'
f = 'input.txt'
# Read data, pattern on line 1, rules thereafter
lines = [l.strip() for l in open(f)]
patt = None
rules = {}
for l in lines:
if not patt:
patt = l
elif len(l) > 0: # "AB -> C" to dictionary
rules[l[:2]] = l[6]
# Parse starting pattern, and get frequency counts of letters,
# and number of transitions
trans = {} # "AB" -> count
counts = {} # 'A' -> count
prevC = None
for c in patt:
counts[c] = counts.get(c,0) + 1
if prevC:
t = prevC + c
trans[t] = trans.get(t,0) + 1
prevC = c
# Show starting data
print('Transitions:', trans)
print('Chars:', counts)
print('Rules:', rules)
# Do one iteration
def iter():
global trans, counts
# Look at each transition pair in pattern
insertions = {} # list of chars to insert inside each pair
for pair in trans.keys():
# Skip if no rule for this transition
if not pair in rules:
print('No rule for:', pair)
continue
# We add one letter for each time this pair appears in pattern
c = rules[pair]
counts[c] = counts.get(c,0) + trans[pair]
# Get the char to insert between the pair, add to list of insertions
# for this pair
if not pair in insertions:
insertions[pair] = []
insertions[pair].append(c)
# Now recalculate the transitions for future iterations from the
# original pairs and the chars inserted
trans2 = {}
for pair in trans.keys():
# Retain as-is if no insertions
if not pair in insertions:
trans2[pair] = trans[pair]
continue
# Transform the original pair into new transitions based on
# inserted characters
pins = insertions[pair]
for i in range(len(pins)):
if i == 0:
p = pair[0] + pins[i]
else:
p = pins[i-1] + pins[i]
trans2[p] = trans2.get(p,0) + trans[pair]
# Last transition
p = pins[-1] + pair[1]
trans2[p] = trans2.get(p,0) + trans[pair]
# This is the new transition list
trans = trans2
# Do iterations (10 for Part 1, 40 for Part 2)
for i in range(40):
print('\nIteration', i+1)
iter()
print('Counts:', counts)
# Show final results
print('\nFinal character counts:', counts)
print('\nMax - min counts:', max(counts.values()) - min(counts.values()))
print(time.time() - t0, 'secs')
| 1,450 | 0 | 22 |
45bef6004eaadfa841b8189d4d5bb21998043cc2 | 9,636 | py | Python | train/inflammation-classifier.py | JorisRoels/mri-inflammation-prediction | 37e9d0e6f3b0a20a6b35667b1e2741b60280c2a4 | [
"MIT"
] | null | null | null | train/inflammation-classifier.py | JorisRoels/mri-inflammation-prediction | 37e9d0e6f3b0a20a6b35667b1e2741b60280c2a4 | [
"MIT"
] | null | null | null | train/inflammation-classifier.py | JorisRoels/mri-inflammation-prediction | 37e9d0e6f3b0a20a6b35667b1e2741b60280c2a4 | [
"MIT"
] | null | null | null | '''
This script illustrates training of an inflammation classifier for patches along SI joints
'''
import argparse
import os
import shutil
import pytorch_lightning as pl
from torch.utils.data import DataLoader
from neuralnets.util.io import print_frm
from neuralnets.util.tools import set_seed
from neuralnets.util.augmentation import *
from pytorch_lightning.callbacks import ModelCheckpoint
from data.datasets import SPARCCDataset
from models.sparcc_cnn import Inflammation_CNN
from util.constants import *
factor = {INFLAMMATION_MODULE: 64, DEEP_INFLAMMATION_MODULE: 12, SPARCC_MODULE: 1, JOINT: 1}
if __name__ == '__main__':
# parse all the arguments
parser = argparse.ArgumentParser()
parser.add_argument("--data-dir", help="Path to the directory that contains a preprocessed dataset", type=str,
required=True)
parser.add_argument("--si-joint-model", help="Path to the SI joint detection checkpoint", type=str, required=True)
parser.add_argument("--model-checkpoint-illium", help="Path to the illium U-Net checkpoint", type=str,
required=True)
parser.add_argument("--model-checkpoint-sacrum", help="Path to the sacrum U-Net checkpoint", type=str,
required=True)
parser.add_argument("--repetitions", help="Number of repetitions", type=int, default=1)
parser.add_argument("--folds", help="Number of folds (overrides repetitions parameter if provided)", type=int,
default=None)
# network parameters
parser.add_argument("--train_val_test_split", help="Train/validation/test split", type=str, default="0.50,0.75")
parser.add_argument("--backbone", help="Backbone feature extractor of the model", type=str, default='ResNet18')
parser.add_argument("--omit_t1_input", help="Boolean flag that omits usage of T1 slices", action='store_true',
default=False)
parser.add_argument("--omit_t2_input", help="Boolean flag that omits usage of T1 slices", action='store_true',
default=False)
parser.add_argument("--omit_weighting", help="Boolean flag that specifies ROI masking", action='store_true',
default=False)
# optimization parameters
parser.add_argument("--epochs", help="Number of training epochs", type=int, default=400)
parser.add_argument("--lr", help="Learning rate for the optimization", type=float, default=1e-3)
# compute parameters
parser.add_argument("--train_batch_size", help="Batch size during training", type=int, default=1)
parser.add_argument("--test_batch_size", help="Batch size during testing", type=int, default=1)
parser.add_argument("--num_workers", help="Amount of workers", type=int, default=12)
parser.add_argument("--gpus", help="Devices available for computing", type=str, default='0')
parser.add_argument("--accelerator", help="Acceleration engine for computations", type=str, default='dp')
# logging parameters
parser.add_argument("--log_dir", help="Logging directory", type=str, default='logs')
parser.add_argument("--log_freq", help="Frequency to log results", type=int, default=50)
parser.add_argument("--log_refresh_rate", help="Refresh rate for logging", type=int, default=1)
parser.add_argument("--seed", help="Seed for reproducibility", type=int, default=0)
parser.add_argument("--clean-up", help="Boolean flag that specifies ROI masking", action='store_true', default=False)
args = parser.parse_args()
args.train_val_test_split = [float(item) for item in args.train_val_test_split.split(',')]
metrics = []
if args.folds is not None:
reps = args.folds
range_split = ((0, 1), (0, 1))
else:
reps = args.repetitions
f = None
split = args.train_val_test_split
range_split = ((0, split[1]), (0, split[1]), (split[1], 1))
for i in range(reps):
rep_str = 'fold' if args.folds is not None else 'repetition'
print_frm('')
print_frm('Start processing %s %d/%d ...' % (rep_str, i+1, reps))
print_frm('')
"""
Fix seed (in case of cross validation), or increment if repetitive training
"""
if args.folds is not None:
set_seed(args.seed)
else:
args.seed = args.seed + 1
set_seed(args.seed)
"""
Load the data
"""
print_frm('Loading data')
transform = Compose([Rotate90(), Flip(prob=0.5, dim=0), Flip(prob=0.5, dim=1), RandomDeformation(),
AddNoise(sigma_max=0.05)])
train = SPARCCDataset(args.data_dir, args.si_joint_model, args.model_checkpoint_illium,
args.model_checkpoint_sacrum, range_split=range_split[0], folds=args.folds, f=i,
train=True, transform=transform, seed=args.seed, mode=INFLAMMATION_MODULE,
use_t1_input=not args.omit_t1_input, use_t2_input=not args.omit_t2_input,
apply_weighting=not args.omit_weighting)
val = SPARCCDataset(args.data_dir, args.si_joint_model, args.model_checkpoint_illium,
args.model_checkpoint_sacrum, range_split=range_split[1], folds=args.folds, f=i,
train=False, seed=args.seed, mode=INFLAMMATION_MODULE, use_t1_input=not args.omit_t1_input,
use_t2_input=not args.omit_t2_input, apply_weighting=not args.omit_weighting)
print_frm('Train data distribution: Infl: %.2f - Non-infl: %.2f' % (100*np.mean(train.q_scores),
100*np.mean(1-train.q_scores)))
print_frm('Val data distribution: Infl: %.2f - Non-infl: %.2f' % (100*np.mean(val.q_scores),
100*np.mean(1-val.q_scores)))
if args.folds is None:
test = SPARCCDataset(args.data_dir, args.si_joint_model, args.model_checkpoint_illium,
args.model_checkpoint_sacrum, range_split=range_split[2], seed=args.seed,
mode=INFLAMMATION_MODULE, use_t1_input=not args.omit_t1_input,
use_t2_input=not args.omit_t2_input, apply_weighting=not args.omit_weighting)
print_frm('Test data distribution: Infl: %.2f - Non-infl: %.2f' % (100*np.mean(test.q_scores),
100*np.mean(1-test.q_scores)))
"""
Build the network
"""
print_frm('Building the network')
weights = train.score_weights[0]
net = Inflammation_CNN(backbone=args.backbone, lr=args.lr, use_t1_input=not args.omit_t1_input,
use_t2_input=not args.omit_t2_input, weights=weights)
print_frm('Balancing weights for loss function: %s' % (weights))
"""
Train the inflammation network
"""
print_frm('Starting training of the inflammation network')
trainer = _train_module(net, train, val, args)
print_frm('Testing network')
_test_module(trainer, net, val if args.folds is not None else test, args)
metrics.append([float(trainer.logged_metrics['test/' + m].cpu()) for m in METRICS])
"""
Save the final model
"""
print_frm('Saving final model')
shutil.copyfile(trainer.checkpoint_callback.best_model_path, os.path.join(trainer.log_dir, OPTIMAL_CKPT))
"""
Clean up
"""
print_frm('Cleaning up')
if args.clean_up:
os.system('rm -r ' + os.path.join(trainer.log_dir, 'checkpoints'))
"""
Report final performance results
"""
metrics = np.asarray(metrics)
metrics_avg = np.mean(metrics, axis=0)
print_frm('Final performance report:')
print_frm('=========================')
for i, m in enumerate(METRICS):
print_frm(' %s: %f' % (m, metrics_avg[i]))
| 50.450262 | 121 | 0.642798 | '''
This script illustrates training of an inflammation classifier for patches along SI joints
'''
import argparse
import os
import shutil
import pytorch_lightning as pl
from torch.utils.data import DataLoader
from neuralnets.util.io import print_frm
from neuralnets.util.tools import set_seed
from neuralnets.util.augmentation import *
from pytorch_lightning.callbacks import ModelCheckpoint
from data.datasets import SPARCCDataset
from models.sparcc_cnn import Inflammation_CNN
from util.constants import *
factor = {INFLAMMATION_MODULE: 64, DEEP_INFLAMMATION_MODULE: 12, SPARCC_MODULE: 1, JOINT: 1}
def _train_module(net, train_data, val_data, args):
train_data.mode = INFLAMMATION_MODULE
val_data.mode = INFLAMMATION_MODULE
train_loader = DataLoader(train_data, batch_size=factor[INFLAMMATION_MODULE]*args.train_batch_size,
num_workers=args.num_workers, pin_memory=True, shuffle=True)
val_loader = DataLoader(val_data, batch_size=factor[INFLAMMATION_MODULE]*args.test_batch_size,
num_workers=args.num_workers, pin_memory=True)
checkpoint_callback = ModelCheckpoint(save_top_k=5, verbose=True, monitor='val/roc-auc', mode='max')
trainer = pl.Trainer(max_epochs=args.epochs, gpus=args.gpus, accelerator=args.accelerator,
default_root_dir=args.log_dir, flush_logs_every_n_steps=args.log_freq,
log_every_n_steps=args.log_freq, callbacks=[checkpoint_callback],
progress_bar_refresh_rate=args.log_refresh_rate, num_sanity_val_steps=0, deterministic=True)
trainer.fit(net, train_loader, val_loader)
return trainer
def _test_module(trainer, net, test_data, args):
test_data.mode = INFLAMMATION_MODULE
net.load_state_dict(torch.load(trainer.checkpoint_callback.best_model_path)['state_dict'])
test_loader = DataLoader(test_data, batch_size=factor[INFLAMMATION_MODULE]*args.test_batch_size,
num_workers=args.num_workers, pin_memory=True)
trainer.test(net, test_loader)
return trainer
if __name__ == '__main__':
# parse all the arguments
parser = argparse.ArgumentParser()
parser.add_argument("--data-dir", help="Path to the directory that contains a preprocessed dataset", type=str,
required=True)
parser.add_argument("--si-joint-model", help="Path to the SI joint detection checkpoint", type=str, required=True)
parser.add_argument("--model-checkpoint-illium", help="Path to the illium U-Net checkpoint", type=str,
required=True)
parser.add_argument("--model-checkpoint-sacrum", help="Path to the sacrum U-Net checkpoint", type=str,
required=True)
parser.add_argument("--repetitions", help="Number of repetitions", type=int, default=1)
parser.add_argument("--folds", help="Number of folds (overrides repetitions parameter if provided)", type=int,
default=None)
# network parameters
parser.add_argument("--train_val_test_split", help="Train/validation/test split", type=str, default="0.50,0.75")
parser.add_argument("--backbone", help="Backbone feature extractor of the model", type=str, default='ResNet18')
parser.add_argument("--omit_t1_input", help="Boolean flag that omits usage of T1 slices", action='store_true',
default=False)
parser.add_argument("--omit_t2_input", help="Boolean flag that omits usage of T1 slices", action='store_true',
default=False)
parser.add_argument("--omit_weighting", help="Boolean flag that specifies ROI masking", action='store_true',
default=False)
# optimization parameters
parser.add_argument("--epochs", help="Number of training epochs", type=int, default=400)
parser.add_argument("--lr", help="Learning rate for the optimization", type=float, default=1e-3)
# compute parameters
parser.add_argument("--train_batch_size", help="Batch size during training", type=int, default=1)
parser.add_argument("--test_batch_size", help="Batch size during testing", type=int, default=1)
parser.add_argument("--num_workers", help="Amount of workers", type=int, default=12)
parser.add_argument("--gpus", help="Devices available for computing", type=str, default='0')
parser.add_argument("--accelerator", help="Acceleration engine for computations", type=str, default='dp')
# logging parameters
parser.add_argument("--log_dir", help="Logging directory", type=str, default='logs')
parser.add_argument("--log_freq", help="Frequency to log results", type=int, default=50)
parser.add_argument("--log_refresh_rate", help="Refresh rate for logging", type=int, default=1)
parser.add_argument("--seed", help="Seed for reproducibility", type=int, default=0)
parser.add_argument("--clean-up", help="Boolean flag that specifies ROI masking", action='store_true', default=False)
args = parser.parse_args()
args.train_val_test_split = [float(item) for item in args.train_val_test_split.split(',')]
metrics = []
if args.folds is not None:
reps = args.folds
range_split = ((0, 1), (0, 1))
else:
reps = args.repetitions
f = None
split = args.train_val_test_split
range_split = ((0, split[1]), (0, split[1]), (split[1], 1))
for i in range(reps):
rep_str = 'fold' if args.folds is not None else 'repetition'
print_frm('')
print_frm('Start processing %s %d/%d ...' % (rep_str, i+1, reps))
print_frm('')
"""
Fix seed (in case of cross validation), or increment if repetitive training
"""
if args.folds is not None:
set_seed(args.seed)
else:
args.seed = args.seed + 1
set_seed(args.seed)
"""
Load the data
"""
print_frm('Loading data')
transform = Compose([Rotate90(), Flip(prob=0.5, dim=0), Flip(prob=0.5, dim=1), RandomDeformation(),
AddNoise(sigma_max=0.05)])
train = SPARCCDataset(args.data_dir, args.si_joint_model, args.model_checkpoint_illium,
args.model_checkpoint_sacrum, range_split=range_split[0], folds=args.folds, f=i,
train=True, transform=transform, seed=args.seed, mode=INFLAMMATION_MODULE,
use_t1_input=not args.omit_t1_input, use_t2_input=not args.omit_t2_input,
apply_weighting=not args.omit_weighting)
val = SPARCCDataset(args.data_dir, args.si_joint_model, args.model_checkpoint_illium,
args.model_checkpoint_sacrum, range_split=range_split[1], folds=args.folds, f=i,
train=False, seed=args.seed, mode=INFLAMMATION_MODULE, use_t1_input=not args.omit_t1_input,
use_t2_input=not args.omit_t2_input, apply_weighting=not args.omit_weighting)
print_frm('Train data distribution: Infl: %.2f - Non-infl: %.2f' % (100*np.mean(train.q_scores),
100*np.mean(1-train.q_scores)))
print_frm('Val data distribution: Infl: %.2f - Non-infl: %.2f' % (100*np.mean(val.q_scores),
100*np.mean(1-val.q_scores)))
if args.folds is None:
test = SPARCCDataset(args.data_dir, args.si_joint_model, args.model_checkpoint_illium,
args.model_checkpoint_sacrum, range_split=range_split[2], seed=args.seed,
mode=INFLAMMATION_MODULE, use_t1_input=not args.omit_t1_input,
use_t2_input=not args.omit_t2_input, apply_weighting=not args.omit_weighting)
print_frm('Test data distribution: Infl: %.2f - Non-infl: %.2f' % (100*np.mean(test.q_scores),
100*np.mean(1-test.q_scores)))
"""
Build the network
"""
print_frm('Building the network')
weights = train.score_weights[0]
net = Inflammation_CNN(backbone=args.backbone, lr=args.lr, use_t1_input=not args.omit_t1_input,
use_t2_input=not args.omit_t2_input, weights=weights)
print_frm('Balancing weights for loss function: %s' % (weights))
"""
Train the inflammation network
"""
print_frm('Starting training of the inflammation network')
trainer = _train_module(net, train, val, args)
print_frm('Testing network')
_test_module(trainer, net, val if args.folds is not None else test, args)
metrics.append([float(trainer.logged_metrics['test/' + m].cpu()) for m in METRICS])
"""
Save the final model
"""
print_frm('Saving final model')
shutil.copyfile(trainer.checkpoint_callback.best_model_path, os.path.join(trainer.log_dir, OPTIMAL_CKPT))
"""
Clean up
"""
print_frm('Cleaning up')
if args.clean_up:
os.system('rm -r ' + os.path.join(trainer.log_dir, 'checkpoints'))
"""
Report final performance results
"""
metrics = np.asarray(metrics)
metrics_avg = np.mean(metrics, axis=0)
print_frm('Final performance report:')
print_frm('=========================')
for i, m in enumerate(METRICS):
print_frm(' %s: %f' % (m, metrics_avg[i]))
| 1,450 | 0 | 46 |
fa3a4d39bf525419dd2000248d3378e487b7e58d | 51 | py | Python | examples/one/rule_1.py | ayushpallav/anthill | 740b8fce4281dfc4ca587c21a2d37741c649d870 | [
"MIT"
] | 14 | 2020-05-22T20:57:29.000Z | 2021-08-19T14:56:32.000Z | examples/one/rule_1.py | ayushpallav/apple-pie | 740b8fce4281dfc4ca587c21a2d37741c649d870 | [
"MIT"
] | 2 | 2021-01-04T05:05:08.000Z | 2021-01-04T05:11:08.000Z | examples/one/rule_1.py | ayushpallav/apple-pie | 740b8fce4281dfc4ca587c21a2d37741c649d870 | [
"MIT"
] | null | null | null | print("-----------------rule_1------------------")
| 25.5 | 50 | 0.215686 | print("-----------------rule_1------------------")
| 0 | 0 | 0 |
0cd13ee9792d2b275510c09e2c2a14904a1130ee | 456 | py | Python | web/flask_test/flask_test.py | nciefeiniu/python-test | d81fcfff8cdec724c3010d6b7a77aabad7f90595 | [
"Apache-2.0"
] | null | null | null | web/flask_test/flask_test.py | nciefeiniu/python-test | d81fcfff8cdec724c3010d6b7a77aabad7f90595 | [
"Apache-2.0"
] | null | null | null | web/flask_test/flask_test.py | nciefeiniu/python-test | d81fcfff8cdec724c3010d6b7a77aabad7f90595 | [
"Apache-2.0"
] | null | null | null | from flask import Flask
from flask import render_template
app = Flask(__name__)
@app.route('/hello/<name>')
@app.route('/user/<username>', methods=['POST','GET'])
@app.route('/test/<num>')
if __name__ == '__main__':
app.run()
| 20.727273 | 54 | 0.690789 | from flask import Flask
from flask import render_template
app = Flask(__name__)
@app.route('/hello/<name>')
def hello_world(name):
return render_template('hello.html', name=name)
@app.route('/user/<username>', methods=['POST','GET'])
def show_user_profile(username):
# show the user profile for that user
return 'User %s' % username
@app.route('/test/<num>')
def print_number(num):
return num
if __name__ == '__main__':
app.run()
| 154 | 0 | 66 |
d96b97a3673f37466a8c9eb623e60075bc2cd115 | 3,727 | py | Python | tests/propagators/test_binary_propagator.py | rlopes-ki/python-sensor | 07e827f9982b2a0c482e8eab82d1a420923efd5e | [
"MIT"
] | 61 | 2017-09-27T02:50:17.000Z | 2022-03-22T12:13:37.000Z | tests/propagators/test_binary_propagator.py | rlopes-ki/python-sensor | 07e827f9982b2a0c482e8eab82d1a420923efd5e | [
"MIT"
] | 82 | 2017-07-11T13:47:33.000Z | 2022-03-22T10:10:38.000Z | tests/propagators/test_binary_propagator.py | rlopes-ki/python-sensor | 07e827f9982b2a0c482e8eab82d1a420923efd5e | [
"MIT"
] | 27 | 2017-09-11T16:22:32.000Z | 2022-03-11T17:21:49.000Z | # (c) Copyright IBM Corp. 2021
# (c) Copyright Instana Inc. 2021
from instana.propagators.binary_propagator import BinaryPropagator
from instana.span_context import SpanContext
import unittest
| 51.054795 | 114 | 0.622485 | # (c) Copyright IBM Corp. 2021
# (c) Copyright Instana Inc. 2021
from instana.propagators.binary_propagator import BinaryPropagator
from instana.span_context import SpanContext
import unittest
class TestBinaryPropagator(unittest.TestCase):
def setUp(self):
self.bp = BinaryPropagator()
def test_inject_carrier_dict(self):
carrier = {}
ctx = SpanContext(span_id="1234567890abcdef", trace_id="1234d0e0e4736234",
level=1, baggage={}, sampled=True,
synthetic=False)
carrier = self.bp.inject(ctx, carrier)
self.assertEqual(carrier[b'x-instana-t'], b"1234d0e0e4736234")
def test_inject_carrier_dict_w3c_True(self):
carrier = {}
ctx = SpanContext(span_id="1234567890abcdef", trace_id="1234d0e0e4736234",
level=1, baggage={}, sampled=True,
synthetic=False)
carrier = self.bp.inject(ctx, carrier, disable_w3c_trace_context=False)
self.assertEqual(carrier[b'x-instana-t'], b"1234d0e0e4736234")
self.assertEqual(carrier[b'traceparent'], b'00-00000000000000001234d0e0e4736234-1234567890abcdef-01')
self.assertEqual(carrier[b'tracestate'], b'in=1234d0e0e4736234;1234567890abcdef')
def test_inject_carrier_list(self):
carrier = []
ctx = SpanContext(span_id="1234567890abcdef", trace_id="1234d0e0e4736234",
level=1, baggage={}, sampled=True,
synthetic=False)
carrier = self.bp.inject(ctx, carrier)
self.assertEqual(carrier[0], (b'x-instana-t', b'1234d0e0e4736234'))
def test_inject_carrier_list_w3c_True(self):
carrier = []
ctx = SpanContext(span_id="1234567890abcdef", trace_id="1234d0e0e4736234",
level=1, baggage={}, sampled=True,
synthetic=False)
carrier = self.bp.inject(ctx, carrier, disable_w3c_trace_context=False)
self.assertEqual(carrier[2], (b'x-instana-t', b'1234d0e0e4736234'))
self.assertEqual(carrier[0], (b'traceparent', b'00-00000000000000001234d0e0e4736234-1234567890abcdef-01'))
self.assertEqual(carrier[1], (b'tracestate', b'in=1234d0e0e4736234;1234567890abcdef'))
def test_inject_carrier_tupple(self):
carrier = ()
ctx = SpanContext(span_id="1234567890abcdef", trace_id="1234d0e0e4736234",
level=1, baggage={}, sampled=True,
synthetic=False)
carrier = self.bp.inject(ctx, carrier)
self.assertEqual(carrier[0], (b'x-instana-t', b'1234d0e0e4736234'))
def test_inject_carrier_tupple_w3c_True(self):
carrier = ()
ctx = SpanContext(span_id="1234567890abcdef", trace_id="1234d0e0e4736234",
level=1, baggage={}, sampled=True,
synthetic=False)
carrier = self.bp.inject(ctx, carrier, disable_w3c_trace_context=False)
self.assertEqual(carrier[2], (b'x-instana-t', b'1234d0e0e4736234'))
self.assertEqual(carrier[0], (b'traceparent', b'00-00000000000000001234d0e0e4736234-1234567890abcdef-01'))
self.assertEqual(carrier[1], (b'tracestate', b'in=1234d0e0e4736234;1234567890abcdef'))
def test_inject_carrier_set_exception(self):
carrier = set()
ctx = SpanContext(span_id="1234567890abcdef", trace_id="1234d0e0e4736234",
level=1, baggage={}, sampled=True,
synthetic=False)
carrier = self.bp.inject(ctx, carrier)
self.assertIsNone(carrier) | 3,270 | 25 | 238 |
0e2ca74fbdb064638c6b006fa12bbc8faacd1af7 | 618 | py | Python | pyBoard v1.1(STM32F405)/3.拓展实验/2.RGB灯带/main.py | 01studio-lab/MicroPython_Examples | f06a1bee398674ceafebed2aac88d8413cc8abad | [
"MIT"
] | 73 | 2020-05-02T13:48:27.000Z | 2022-03-26T13:15:10.000Z | pyBoard v1.1(STM32F405)/3.拓展实验/2.RGB灯带/main.py | 01studio-lab/MicroPython_Examples | f06a1bee398674ceafebed2aac88d8413cc8abad | [
"MIT"
] | null | null | null | pyBoard v1.1(STM32F405)/3.拓展实验/2.RGB灯带/main.py | 01studio-lab/MicroPython_Examples | f06a1bee398674ceafebed2aac88d8413cc8abad | [
"MIT"
] | 50 | 2020-05-15T13:57:28.000Z | 2022-03-30T14:03:33.000Z |
'''
实验名称:RGB灯带
版本:v1.0
日期:2019.7
作者:01Studio
说明:RGB灯带控制。
'''
from ws2812 import WS2812
from colors import *
from machine import Pin
import pyb
#定义灯带连接引脚,Y11接口
LED = Pin('Y11',Pin.OUT,value=0)
#构建RGB灯带对象,定义控制引脚和灯珠数量
strip = WS2812(spi_bus=LED, led_count=30)
#灯带填色函数,灯珠数量为led_count
#清空RGB灯带颜色
strip.show(fill_color(EMPTY))
while True:
strip.show(fill_color(RED))
pyb.delay(1000)
strip.show(fill_color(GREEN))
pyb.delay(1000)
strip.show(fill_color(BLUE))
pyb.delay(1000)
| 15.073171 | 41 | 0.699029 |
'''
实验名称:RGB灯带
版本:v1.0
日期:2019.7
作者:01Studio
说明:RGB灯带控制。
'''
from ws2812 import WS2812
from colors import *
from machine import Pin
import pyb
#定义灯带连接引脚,Y11接口
LED = Pin('Y11',Pin.OUT,value=0)
#构建RGB灯带对象,定义控制引脚和灯珠数量
strip = WS2812(spi_bus=LED, led_count=30)
#灯带填色函数,灯珠数量为led_count
def fill_color(color):
data=[]
for i in range (strip.led_count):
data.append(color)
return data
#清空RGB灯带颜色
strip.show(fill_color(EMPTY))
while True:
strip.show(fill_color(RED))
pyb.delay(1000)
strip.show(fill_color(GREEN))
pyb.delay(1000)
strip.show(fill_color(BLUE))
pyb.delay(1000)
| 94 | 0 | 22 |
fcccc0b2a1e1b7db9084f5867072305324db2292 | 5,307 | py | Python | test/test_prettyprinter.py | plotnick/prettyprinter | edde630011ad5eada6476366a2b2da422f4a9d74 | [
"MIT"
] | null | null | null | test/test_prettyprinter.py | plotnick/prettyprinter | edde630011ad5eada6476366a2b2da422f4a9d74 | [
"MIT"
] | null | null | null | test/test_prettyprinter.py | plotnick/prettyprinter | edde630011ad5eada6476366a2b2da422f4a9d74 | [
"MIT"
] | null | null | null | from __future__ import with_statement
import unittest
from cStringIO import StringIO
from format import format
from prettyprinter import *
from bindings import bindings
import printervars
if __name__ == "__main__":
unittest.main()
| 34.914474 | 80 | 0.44658 | from __future__ import with_statement
import unittest
from cStringIO import StringIO
from format import format
from prettyprinter import *
from bindings import bindings
import printervars
class PrettyPrinterTest(unittest.TestCase):
roads = ["Elm", "Cottonwood"]
town = ["Boston"]
def ppEquals(self, result, obj, *args, **kwargs):
stringstream = StringIO()
pp = PrettyPrinter(stringstream, *args, **kwargs)
pp.pprint(obj)
pp.close()
self.assertEqual(result, stringstream.getvalue())
stringstream.close()
def ppFormatEquals(self, result, width, control, *args):
stringstream = StringIO()
pp = PrettyPrinter(stream=stringstream, width=width)
format(pp, control, *args)
self.assertEqual(result, stringstream.getvalue())
pp.close()
stringstream.close()
def testLogicalBlock(self):
control = "+ ~<Roads ~<~A, ~:_~A~:> ~:_ Town ~<~A~:>~:> +"
self.ppFormatEquals("""\
+ Roads Elm, Cottonwood Town Boston +""", 50, control, [self.roads, self.town])
self.ppFormatEquals("""\
+ Roads Elm, Cottonwood
Town Boston +""", 25, control, [self.roads, self.town])
self.ppFormatEquals("""\
+ Roads Elm,
Cottonwood
Town Boston +""", 21, control, [self.roads, self.town])
def testPerLinePrefix(self):
control = "~<;;; ~@;Roads ~<= ~@;~A, ~:_~A~:> ~:_ Town ~<~A~:>~:>"
self.ppFormatEquals("""\
;;; Roads = Elm, Cottonwood Town Boston""",
50, control, [self.roads, self.town])
self.ppFormatEquals("""\
;;; Roads = Elm,
;;; = Cottonwood
;;; Town Boston""", 25, control, [self.roads, self.town])
# Per-line prefixes should obey a stack discipline.
self.ppFormatEquals("""\
* abc
* + 123
* + 456
* + 789
* def""", None, "~<* ~@;~A~:@_~<+ ~@;~@{~A~^~:@_~}~:>~:@_~A~:>",
("abc", (123, 456, 789), "def"))
# Per-line prefixes are always printed, no matter how a newline
# originates.
self.ppFormatEquals("""\
;;; (list first
;;; string on
;;; two lines)""", 25, "~@<;;; ~@;(list ~@<~A ~_~A~:>)~:>",
"first", "string on\ntwo lines")
def testParagraphFilling(self):
# Strictly speaking, this should be a format test, since filling
# is done via a syntactic transformation on format control strings,
# but we needn't be pedantic.
self.ppFormatEquals("""\
Main street
goes to
Boston.""", 12, "~<~:(~A~) street goes to ~:(~A~).~:@>", ["main", "boston"])
def testIndentation(self):
control = "~<(~;~A ~:I~A ~:_~A ~1I~_~A~;)~:>"
defun = ["defun", "prod", "(x y)", "(* x y)"]
self.ppFormatEquals("""\
(defun prod (x y) (* x y))""", 50, control, defun)
self.ppFormatEquals("""\
(defun prod (x y)
(* x y))""", 25, control, defun)
self.ppFormatEquals("""\
(defun prod
(x y)
(* x y))""", 15, control, defun)
self.ppFormatEquals("""\
;;; (defun prod
;;; (x y)
;;; (* x y))""", 15, "~<;;; ~@;~@?~:>", [control, defun])
def testPrintLevel(self):
levels = ["#",
"(1, #)",
"(1, (2, #))",
"(1, (2, (3, #)))",
"(1, (2, (3, (4, #))))",
"(1, (2, (3, (4, (5, #)))))",
"(1, (2, (3, (4, (5, (6,))))))",
"(1, (2, (3, (4, (5, (6,))))))"]
a = (1, (2, (3, (4, (5, (6,))))))
for i in range(8):
with bindings(printervars, print_level=i):
self.ppEquals(levels[i], a)
def testPrintLength(self):
lengths = ["(...)",
"(1, ...)",
"(1, 2, ...)",
"(1, 2, 3, ...)",
"(1, 2, 3, 4, ...)",
"(1, 2, 3, 4, 5, ...)",
"(1, 2, 3, 4, 5, 6)",
"(1, 2, 3, 4, 5, 6)"]
a = (1, 2, 3, 4, 5, 6)
for i in range(7):
with bindings(printervars, print_length=i):
self.ppEquals(lengths[i], a)
def testPrintLevelLength(self):
levelLengths = {
(0, 1): "#",
(1, 1): "(if ...)",
(1, 2): "(if # ...)",
(1, 3): "(if # # ...)",
(1, 4): "(if # # #)",
(2, 1): "(if ...)",
(2, 2): "(if (member x ...) ...)",
(2, 3): "(if (member x y) (+ # 3) ...)",
(3, 2): "(if (member x ...) ...)",
(3, 3): "(if (member x y) (+ (car x) 3) ...)",
(3, 4): "(if (member x y) (+ (car x) 3) (foo (a b c d ...)))"
}
sexp = ("if", ("member", "x", "y"), ("+", ("car", "x"), 3),
("foo", ("a", "b", "c", "d", "Baz")))
for (level, length) in [(0, 1), (1, 2), (1, 2), (1, 3), (1, 4),
(2, 1), (2, 2), (2, 3), (3, 2), (3, 3), (3, 4)]:
with bindings(printervars,
print_pretty=True, print_escape=False,
print_level=level, print_length=length):
s = format(None, "~W", sexp)
self.assertEqual(levelLengths[(level, length)],
s.replace(",", ""))
if __name__ == "__main__":
unittest.main()
| 4,727 | 321 | 23 |
d520b024353774f21173e09b7db4a18cc61c1f22 | 11,522 | py | Python | updates.py | stephenangelico/shed | 9df18b1681366c1add9de0ec1abb4b85e1c99300 | [
"MIT"
] | 12 | 2015-01-12T15:44:46.000Z | 2020-07-10T06:35:36.000Z | updates.py | stephenangelico/shed | 9df18b1681366c1add9de0ec1abb4b85e1c99300 | [
"MIT"
] | 2 | 2021-11-06T02:09:30.000Z | 2022-01-23T07:22:09.000Z | updates.py | stephenangelico/shed | 9df18b1681366c1add9de0ec1abb4b85e1c99300 | [
"MIT"
] | 8 | 2016-10-12T20:17:10.000Z | 2022-03-26T08:18:34.000Z | #!/usr/bin/python3
# requires system Python and the python3-apt package
import textwrap
from collections import OrderedDict # Starting with Python 3.7, we could just use vanilla dicts
import apt # ImportError? apt install python3-apt
HELP_INFO = """Top-level package manager
This tool lists all packages that aren't marked auto, and have updates
available. Press Q at any time to exit without touching your system;
if you have no need to make changes, this script can be run without
root privileges.
Press Space to select or deselect a package for upgrade.
Press 'I' on any package to see more info about it.
Press 'A' to mark a package as automatically installed.
Press 'R' to remove a package.
Press 'Q' to go back, or to quit the program.
"""
def find_ultimate_dependency(cache, deps):
"""Find any one manually-installed package that ultimately caused at
least one of the given deps to be installed. Returns "" if none found.
"""
depchain = {dep: dep for dep in deps}
while depchain:
newchain = {}
for dep, path in depchain.items():
for parent in cache[dep]._pkg.rev_depends_list:
if parent.dep_type_untranslated != "Depends": continue
n = parent.parent_pkg.name
if not cache[n].installed: continue
if not cache[n].is_auto_installed:
# Found one!
return path + " --> " + n
newchain[n] = path + " - " + n
depchain = newchain
return ""
def show_packages(scr, cache, upgrades, auto):
"""Returns True after making cache changes, or False to ignore and do nothing"""
desc = [describe(pkg) for pkg in upgrades]
widths = OrderedDict((x, len(x)) for x in desc[0]) # Start with header widths
for d in desc:
for col in d:
widths[col] = max(widths[col], len(d[col]))
fmt = "[%s] " + " ".join("%%-%ds" % col for col in widths.values())
# print(fmt % ("*", *widths), curses.A_BOLD) # Python 3.5+
print(fmt % (("*",) + tuple(widths)), curses.A_BOLD)
print("--- " + " ".join("-" * col for col in widths.values()))
# TODO: Also adjust for insufficient width? Currently will quietly
# truncate lines at the available width, which isn't bad if it's
# just a character or two, but could be wasteful with long pkgnames.
pkg = 0
actions = [" "] * len(upgrades)
lastheight = None
popup = None
nonautodeps = []
while True:
height, width = scr.getmaxyx() # Also used by make_popup()
if height != lastheight:
# Note that a resize event is sent through as a pseudo-key, so
# this will trigger immediately, without waiting for the next
# actual key.
lastheight, lastpage = height, None
scr.setscrreg(0, height - 1)
perpage = min(height - 8, len(upgrades))
scr.move(perpage + 2, 0)
scr.clrtobot()
print()
if auto: print("Plus %d auto-installed packages." % auto)
print("Select packages to upgrade, then Enter to apply.")
print("Press ? for help, or Q to quit without making any changes")
pagestart = pkg - pkg % perpage
if pagestart != lastpage:
lastpage = pagestart
# Update (only if the page has changed)
for i, d in enumerate(desc[pagestart : pagestart + perpage]):
scr.addstr(i + 2, 0, fmt % ((actions[pagestart + i],) + tuple(d.values())))
# Erase any spare space, including the mandatory blank at the end
for i in range(i + 1, perpage + 1):
# Is this the best way to clear a line??
scr.move(i + 2, 0)
scr.clrtoeol()
scr.setscrreg(2, perpage + 4)
scr.move((pkg % perpage) + 2, 1)
key = scr.getkey()
if popup:
# Restricted key handling when a popup is open
if key in "Aa" and nonautodeps:
for i, p in enumerate(upgrades):
if p in nonautodeps:
toggle(i, "A")
if key in "?QqIiAa":
popup = None
nonautodeps = []
scr.touchwin()
scr.refresh()
curses.curs_set(2)
continue
if key == "Q" or key == "q": return False
if key == "\n": break
if key == "KEY_UP": pkg = (pkg - 1) % len(upgrades)
if key == "KEY_DOWN": pkg = (pkg + 1) % len(upgrades)
if key == "KEY_PPAGE": pkg = 0 if pkg < perpage else pkg - perpage
if key == "KEY_NPAGE": pkg = len(upgrades) - 1 if pkg >= len(upgrades) - perpage else pkg + perpage
if key == "KEY_MOUSE": TODO = curses.getmouse()
if key == " ": toggle(pkg, "I")
if key in "Aa": toggle(pkg, "A")
if key in "Rr": toggle(pkg, "R")
if key == "?":
make_popup(HELP_INFO.split("\n"))
if key == "I" or key == "i":
# TODO: Show a new window with package info
# Show the from and to versions, optionally the changelog,
# and ideally, the list of other packages that would be
# upgraded along with this one (its out-of-date deps).
# Note: get_changelog() appears to be broken. No idea why.
# Neither the default URI nor the hand-checked one below
# work; not sure if it's failing to download or failing to
# parse afterwards, but it gets no useful info.
# http://packages.debian.org/changelogs/pool/%(src_section)s/%(prefix)s/%(src_pkg)s/%(src_pkg)s_%(src_ver)s/changelog
# http://metadata.ftp-master.debian.org/changelogs/%(src_section)s/%(prefix)s/%(src_pkg)s/%(src_pkg)s_%(src_ver)s_changelog
sel = upgrades[pkg]
info = ["Upgrading %s from %s to %s" % (sel.fullname, sel.installed.version, sel.candidate.version), ""]
for line in sel.candidate.description.split("\n"):
info.extend(textwrap.fill(line, width - 6).split("\n"))
try: sel.mark_upgrade()
except apt.package.apt_pkg.Error as e:
info.append("Unable to upgrade this package:")
info.append(e.args[0])
# Should I recognize packages by equality, identity, or name?
changes = [p for p in cache.get_changes() if p != sel]
if changes:
info.append("")
info.append("Additional packages to upgrade:")
nonautodeps = []
for p in changes:
if p.installed == p.candidate: continue # For some reason, it sometimes marks "changes" that aren't changes at all.
info.append("* %s from %s to %s" % (
p.fullname,
p.installed.version if p.installed else "(none)",
p.candidate.version,
))
if not p.is_auto_installed:
info[-1] = (info[-1], curses.A_BOLD)
nonautodeps.append(p)
if nonautodeps:
info.append("")
info.append(("%d dependencies were not auto-installed." % len(nonautodeps), curses.A_BOLD))
info.append(("Press 'A' to mark those deps as auto.", curses.A_BOLD))
# TODO: Disambiguate "A to mark my deps auto" from "A to mark me auto"?
cache.clear()
make_popup(info)
if key in "Ww":
# Similar info to "aptitude why".
# Mark this package auto, mark it for deletion. See what needs to be
# deleted. Filter to only those which are not auto. List those as the
# deps of this package.
# 1) Find out why this package was installed
# 2) If this is a hard dep of a non-auto package (or of an auto package
# that is a hard dep of a non-auto package), this can be marked auto.
# 3) If this is a Recommends/Suggests only, say which package.
p = upgrades[pkg]._pkg # Is there a non-private way to find the underlying package?
deps, recs, sugs = {}, {}, {}
for dep in p.rev_depends_list:
# Note: Using get_fullname() would be better than name, but it doesn't work on older apts
n = dep.parent_pkg.name
inst = cache[n]
if not inst.installed: continue
type = dep.dep_type_untranslated
if type == "Depends":
# Hard dependency. Definite reason to install something
# TODO: Keep the most interesting, not the last seen, version?
deps[n] = dep.parent_ver
elif type == "Recommends":
# Soft dependency. If there are no hard deps, then this would be
# why the package was installed, but it shouldn't be marked auto.
recs[n] = dep.parent_ver
elif type == "Suggests":
# Even softer dependency. As with Recommends but even more so.
# A "Suggests" dep won't be shown unless there are no Deps *or*
# Recs.
sugs[n] = dep.parent_ver
info = ["Why was %s installed?" % upgrades[pkg].name, ""]
if deps: info.append("Depended on by:")
elif recs: info.append("Recommended by:")
elif sugs: info.append("Suggested by:")
else: info.append("Presumably manual installation") # No deps.
got_nonauto = False
for dep in deps or recs or sugs: # Pick the highest-priority category only
if not cache[dep].is_auto_installed:
info.append(("* " + dep, curses.A_BOLD))
got_nonauto = True
else: info.append("* " + dep)
if deps and not got_nonauto:
# Trace the chain of deps and find something, anything, that
# was manually installed. Keep going till we get somewhere or
# run out of dependencies to look at.
cause = find_ultimate_dependency(cache, deps)
if cause: info.extend(["", "Installed because:", cause])
else: info.extend(["", "No ultimate installation cause found - everything's autoinstalled."])
make_popup(info)
# scr.addstr(height - 2, 0, repr(key)); scr.clrtoeol()
changes = False
if "R" in actions:
# Don't bother running through the packages (slow) if we aren't removing any
already_auto_removable = {pkg.fullname for pkg in cache if pkg.is_auto_removable}
for pkg, ac in zip(upgrades, actions):
if ac != " ": changes = True
if ac == "I": pkg.mark_upgrade()
elif ac == "A": pkg.mark_auto()
elif ac == "R": pkg.mark_delete(purge=True)
if "R" in actions:
# Remove should be equiv of "apt --purge autoremove pkgname" but
# doesn't remove anything that was already autoremovable
for pkg in cache:
if pkg.is_auto_removable and pkg.fullname not in already_auto_removable:
pkg.mark_delete(purge=True)
return changes
if __name__ == "__main__":
main()
| 41.297491 | 126 | 0.67523 | #!/usr/bin/python3
# requires system Python and the python3-apt package
import textwrap
from collections import OrderedDict # Starting with Python 3.7, we could just use vanilla dicts
import apt # ImportError? apt install python3-apt
def describe(pkg):
# Python 3.7 equivalent:
# return {"Name": pkg.name, "Installed": pkg.installed.version, "Candidate": pkg.candidate.version}
return OrderedDict((("Name", pkg.name), ("Current", pkg.installed.version), ("Target", pkg.candidate.version)))
HELP_INFO = """Top-level package manager
This tool lists all packages that aren't marked auto, and have updates
available. Press Q at any time to exit without touching your system;
if you have no need to make changes, this script can be run without
root privileges.
Press Space to select or deselect a package for upgrade.
Press 'I' on any package to see more info about it.
Press 'A' to mark a package as automatically installed.
Press 'R' to remove a package.
Press 'Q' to go back, or to quit the program.
"""
def find_ultimate_dependency(cache, deps):
"""Find any one manually-installed package that ultimately caused at
least one of the given deps to be installed. Returns "" if none found.
"""
depchain = {dep: dep for dep in deps}
while depchain:
newchain = {}
for dep, path in depchain.items():
for parent in cache[dep]._pkg.rev_depends_list:
if parent.dep_type_untranslated != "Depends": continue
n = parent.parent_pkg.name
if not cache[n].installed: continue
if not cache[n].is_auto_installed:
# Found one!
return path + " --> " + n
newchain[n] = path + " - " + n
depchain = newchain
return ""
def show_packages(scr, cache, upgrades, auto):
"""Returns True after making cache changes, or False to ignore and do nothing"""
def print(s="", *args):
scr.addstr(str(s) + "\n", *args)
desc = [describe(pkg) for pkg in upgrades]
widths = OrderedDict((x, len(x)) for x in desc[0]) # Start with header widths
for d in desc:
for col in d:
widths[col] = max(widths[col], len(d[col]))
fmt = "[%s] " + " ".join("%%-%ds" % col for col in widths.values())
# print(fmt % ("*", *widths), curses.A_BOLD) # Python 3.5+
print(fmt % (("*",) + tuple(widths)), curses.A_BOLD)
print("--- " + " ".join("-" * col for col in widths.values()))
# TODO: Also adjust for insufficient width? Currently will quietly
# truncate lines at the available width, which isn't bad if it's
# just a character or two, but could be wasteful with long pkgnames.
pkg = 0
actions = [" "] * len(upgrades)
lastheight = None
popup = None
def toggle(pkg, act):
actions[pkg] = " " if actions[pkg] == act else act
if pkg >= pagestart and pkg < pagestart + perpage:
scr.addstr(pkg % perpage + 2, 1, actions[pkg])
def make_popup(lines):
nonlocal popup
lines = lines[:height - 5] # Truncate if we don't have enough screen space
popup = curses.newwin(len(lines) + 2, width - 4, 2, 2)
popup.erase()
popup.border()
for i, line in enumerate(lines):
if not isinstance(line, tuple): line = (line,)
popup.addstr(i + 1, 1, line[0][:width - 6], *line[1:])
popup.refresh()
curses.curs_set(0)
nonautodeps = []
while True:
height, width = scr.getmaxyx() # Also used by make_popup()
if height != lastheight:
# Note that a resize event is sent through as a pseudo-key, so
# this will trigger immediately, without waiting for the next
# actual key.
lastheight, lastpage = height, None
scr.setscrreg(0, height - 1)
perpage = min(height - 8, len(upgrades))
scr.move(perpage + 2, 0)
scr.clrtobot()
print()
if auto: print("Plus %d auto-installed packages." % auto)
print("Select packages to upgrade, then Enter to apply.")
print("Press ? for help, or Q to quit without making any changes")
pagestart = pkg - pkg % perpage
if pagestart != lastpage:
lastpage = pagestart
# Update (only if the page has changed)
for i, d in enumerate(desc[pagestart : pagestart + perpage]):
scr.addstr(i + 2, 0, fmt % ((actions[pagestart + i],) + tuple(d.values())))
# Erase any spare space, including the mandatory blank at the end
for i in range(i + 1, perpage + 1):
# Is this the best way to clear a line??
scr.move(i + 2, 0)
scr.clrtoeol()
scr.setscrreg(2, perpage + 4)
scr.move((pkg % perpage) + 2, 1)
key = scr.getkey()
if popup:
# Restricted key handling when a popup is open
if key in "Aa" and nonautodeps:
for i, p in enumerate(upgrades):
if p in nonautodeps:
toggle(i, "A")
if key in "?QqIiAa":
popup = None
nonautodeps = []
scr.touchwin()
scr.refresh()
curses.curs_set(2)
continue
if key == "Q" or key == "q": return False
if key == "\n": break
if key == "KEY_UP": pkg = (pkg - 1) % len(upgrades)
if key == "KEY_DOWN": pkg = (pkg + 1) % len(upgrades)
if key == "KEY_PPAGE": pkg = 0 if pkg < perpage else pkg - perpage
if key == "KEY_NPAGE": pkg = len(upgrades) - 1 if pkg >= len(upgrades) - perpage else pkg + perpage
if key == "KEY_MOUSE": TODO = curses.getmouse()
if key == " ": toggle(pkg, "I")
if key in "Aa": toggle(pkg, "A")
if key in "Rr": toggle(pkg, "R")
if key == "?":
make_popup(HELP_INFO.split("\n"))
if key == "I" or key == "i":
# TODO: Show a new window with package info
# Show the from and to versions, optionally the changelog,
# and ideally, the list of other packages that would be
# upgraded along with this one (its out-of-date deps).
# Note: get_changelog() appears to be broken. No idea why.
# Neither the default URI nor the hand-checked one below
# work; not sure if it's failing to download or failing to
# parse afterwards, but it gets no useful info.
# http://packages.debian.org/changelogs/pool/%(src_section)s/%(prefix)s/%(src_pkg)s/%(src_pkg)s_%(src_ver)s/changelog
# http://metadata.ftp-master.debian.org/changelogs/%(src_section)s/%(prefix)s/%(src_pkg)s/%(src_pkg)s_%(src_ver)s_changelog
sel = upgrades[pkg]
info = ["Upgrading %s from %s to %s" % (sel.fullname, sel.installed.version, sel.candidate.version), ""]
for line in sel.candidate.description.split("\n"):
info.extend(textwrap.fill(line, width - 6).split("\n"))
try: sel.mark_upgrade()
except apt.package.apt_pkg.Error as e:
info.append("Unable to upgrade this package:")
info.append(e.args[0])
# Should I recognize packages by equality, identity, or name?
changes = [p for p in cache.get_changes() if p != sel]
if changes:
info.append("")
info.append("Additional packages to upgrade:")
nonautodeps = []
for p in changes:
if p.installed == p.candidate: continue # For some reason, it sometimes marks "changes" that aren't changes at all.
info.append("* %s from %s to %s" % (
p.fullname,
p.installed.version if p.installed else "(none)",
p.candidate.version,
))
if not p.is_auto_installed:
info[-1] = (info[-1], curses.A_BOLD)
nonautodeps.append(p)
if nonautodeps:
info.append("")
info.append(("%d dependencies were not auto-installed." % len(nonautodeps), curses.A_BOLD))
info.append(("Press 'A' to mark those deps as auto.", curses.A_BOLD))
# TODO: Disambiguate "A to mark my deps auto" from "A to mark me auto"?
cache.clear()
make_popup(info)
if key in "Ww":
# Similar info to "aptitude why".
# Mark this package auto, mark it for deletion. See what needs to be
# deleted. Filter to only those which are not auto. List those as the
# deps of this package.
# 1) Find out why this package was installed
# 2) If this is a hard dep of a non-auto package (or of an auto package
# that is a hard dep of a non-auto package), this can be marked auto.
# 3) If this is a Recommends/Suggests only, say which package.
p = upgrades[pkg]._pkg # Is there a non-private way to find the underlying package?
deps, recs, sugs = {}, {}, {}
for dep in p.rev_depends_list:
# Note: Using get_fullname() would be better than name, but it doesn't work on older apts
n = dep.parent_pkg.name
inst = cache[n]
if not inst.installed: continue
type = dep.dep_type_untranslated
if type == "Depends":
# Hard dependency. Definite reason to install something
# TODO: Keep the most interesting, not the last seen, version?
deps[n] = dep.parent_ver
elif type == "Recommends":
# Soft dependency. If there are no hard deps, then this would be
# why the package was installed, but it shouldn't be marked auto.
recs[n] = dep.parent_ver
elif type == "Suggests":
# Even softer dependency. As with Recommends but even more so.
# A "Suggests" dep won't be shown unless there are no Deps *or*
# Recs.
sugs[n] = dep.parent_ver
info = ["Why was %s installed?" % upgrades[pkg].name, ""]
if deps: info.append("Depended on by:")
elif recs: info.append("Recommended by:")
elif sugs: info.append("Suggested by:")
else: info.append("Presumably manual installation") # No deps.
got_nonauto = False
for dep in deps or recs or sugs: # Pick the highest-priority category only
if not cache[dep].is_auto_installed:
info.append(("* " + dep, curses.A_BOLD))
got_nonauto = True
else: info.append("* " + dep)
if deps and not got_nonauto:
# Trace the chain of deps and find something, anything, that
# was manually installed. Keep going till we get somewhere or
# run out of dependencies to look at.
cause = find_ultimate_dependency(cache, deps)
if cause: info.extend(["", "Installed because:", cause])
else: info.extend(["", "No ultimate installation cause found - everything's autoinstalled."])
make_popup(info)
# scr.addstr(height - 2, 0, repr(key)); scr.clrtoeol()
changes = False
if "R" in actions:
# Don't bother running through the packages (slow) if we aren't removing any
already_auto_removable = {pkg.fullname for pkg in cache if pkg.is_auto_removable}
for pkg, ac in zip(upgrades, actions):
if ac != " ": changes = True
if ac == "I": pkg.mark_upgrade()
elif ac == "A": pkg.mark_auto()
elif ac == "R": pkg.mark_delete(purge=True)
if "R" in actions:
# Remove should be equiv of "apt --purge autoremove pkgname" but
# doesn't remove anything that was already autoremovable
for pkg in cache:
if pkg.is_auto_removable and pkg.fullname not in already_auto_removable:
pkg.mark_delete(purge=True)
return changes
def main():
cache = apt.Cache()
cache.open()
upgrades = []
auto = 0
for pkg in cache:
if not pkg.is_installed: continue # This is checking upgrades only
if pkg.candidate == pkg.installed: continue # Already up-to-date
if pkg.is_auto_installed:
# Ignore (but summarize) autoinstalled packages
auto += 1
continue
upgrades.append(pkg)
if not upgrades:
print("Everything up-to-date.")
return
global curses; import curses
upgrades = curses.wrapper(show_packages, cache, upgrades, auto)
if not upgrades: return
# if "simulate": print(cache.get_changes()); return # Note that this doesn't report on mark-auto actions
# TODO: Show progress while it downloads? Not sure why the default progress
# isn't being shown. Might need to subclass apt.progress.text.AcquireProgress?
try: cache.commit()
except apt.cache.LockFailedException:
print("Cannot apply changes when not root.")
for pkg in cache.get_changes():
print("*", pkg.fullname) # TODO: Say what change was requested
# TODO: Provide a 'sudo apt' command that would do the changes
if __name__ == "__main__":
main()
| 1,846 | 0 | 115 |
e2c8923a8f3465ac1f6e6f808251f566f4c248ff | 4,987 | py | Python | model.py | govinsprabhu/Behavioral_Cloning | 6b4bf27e6669707824aaa73b83b8da9e5a1d18b8 | [
"MIT"
] | null | null | null | model.py | govinsprabhu/Behavioral_Cloning | 6b4bf27e6669707824aaa73b83b8da9e5a1d18b8 | [
"MIT"
] | null | null | null | model.py | govinsprabhu/Behavioral_Cloning | 6b4bf27e6669707824aaa73b83b8da9e5a1d18b8 | [
"MIT"
] | null | null | null | import csv
import cv2
import numpy as np
from keras.models import Sequential, load_model
from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout
from keras.layers.convolutional import Conv2D
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
lines = []
path = 'C:/Users/609600403/Documents/ML/project/CarND-Behavioral-Cloning-P3-master/data/'
# loading the image paths from csv
lines = get_data(path)
print(len(lines))
# Splitting train and validation ,used 20% of data for validation
train_samples, validation_samples = train_test_split(lines, test_size=0.2)
# Getting training and validation using generator function, used batch of 32
train_generator = generator(train_samples, path, batch_size=32)
validation_generator = generator(validation_samples, path, batch_size=32)
# getting model
model = get_model()
# when you are loading the model
#model = load_model('model-4.h5')
# training the model using generator
model.fit_generator(train_generator, steps_per_epoch=4*len(train_samples),validation_data=validation_generator, validation_steps=len(validation_samples),epochs=1, verbose=1)
# Saving the model
model.save('model-5.h5') | 35.119718 | 173 | 0.648286 | import csv
import cv2
import numpy as np
from keras.models import Sequential, load_model
from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout
from keras.layers.convolutional import Conv2D
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
lines = []
def get_data(path):
# getting driving_log, and loading the image paths
with open(path+'driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
next(reader,None)
for line in reader:
lines.append(line)
return lines
def generator(data, path, batch_size = 32):
num_samples = len(data)
angle_adjustment = 0.1
image_path = path +'IMG/'
while 1:
data = shuffle(data)
for offset in range(0, num_samples, batch_size):
batch_samples = lines[offset:offset + batch_size]
images = []
angles = []
for line in batch_samples:
# center image
# converting to RGB and adding both flipped and original
center_image = cv2.imread(image_path + line[0].split('/')[-1])
center_image_rgb = cv2.cvtColor(center_image, cv2.COLOR_BGR2RGB)
images.append(center_image_rgb)
angles.append(float(line[3]))
images.append(cv2.flip(center_image_rgb, 1))
angles.append(-float(line[3]))
# left image
# converting to RGB and adding both flipped and original
left_image = cv2.imread(image_path + line[1].split('/')[-1])
left_image_rgb = cv2.cvtColor(left_image, cv2.COLOR_BGR2RGB)
images.append(left_image_rgb)
angles.append(float(line[3]) + angle_adjustment)
images.append(cv2.flip(left_image_rgb, 1))
angles.append(-(float(line[3]) + angle_adjustment))
# right image
# converting to RGB and adding both flipped and original
right_image = cv2.imread(image_path + line[2].split('/')[-1])
right_image_rgb = cv2.cvtColor(right_image, cv2.COLOR_BGR2RGB)
images.append(right_image_rgb)
angles.append(float(line[3]) - angle_adjustment)
images.append(cv2.flip(right_image_rgb, 1))
angles.append(-(float(line[3]) - angle_adjustment))
# converting to numpy array
#print(len(images), len(angles))
X_train = np.array(images)
y_train = np.array(angles)
yield shuffle(X_train, y_train)
def get_model():
# creating model based on NVIDIA paper
model = Sequential()
# applying normalization to the image
model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3)))
# Cropping the image, 70 from top, 25 from bottom
# Input 160x320x3
model.add(Cropping2D(cropping=((70, 25), (0, 0))))
# Applying 24 filter of sizes (5,5) of strides of 2 with relu activation
# input 65x320x3
model.add(Conv2D(24, (5, 5), strides=(2, 2), activation='relu'))
# Applying 36 filter of sizes (5,5) of strides of 2 with relu activation
# input 31x158x24
model.add(Conv2D(36, (5, 5), strides=(2, 2), activation='relu'))
# Applying 48 filter of sizes (5,5) of strides of 2 with relu activation
# input 14x77x36
model.add(Conv2D(48, (5, 5), strides=(2, 2), activation='relu'))
# Applying 64 filter of sizes (5,5) of strides of 1 with relu activation
# input 5x37x48
model.add(Conv2D(64, (3, 3), activation='relu'))
# Applying 64 filter of sizes (5,5) of strides of 2 with relu activation
# input 3x35x64
model.add(Conv2D(64, (3, 3), activation='relu'))
# input 1x33x64
model.add(Flatten())
# input 2112
model.add(Dense(100))
# input 100
model.add(Dense(50))
# input 50
model.add(Dense(10))
# input 10
model.add(Dense(1))
# using adam optimization, and mean square error
model.compile('adam', 'mse')
model.summary()
return model
path = 'C:/Users/609600403/Documents/ML/project/CarND-Behavioral-Cloning-P3-master/data/'
# loading the image paths from csv
lines = get_data(path)
print(len(lines))
# Splitting train and validation ,used 20% of data for validation
train_samples, validation_samples = train_test_split(lines, test_size=0.2)
# Getting training and validation using generator function, used batch of 32
train_generator = generator(train_samples, path, batch_size=32)
validation_generator = generator(validation_samples, path, batch_size=32)
# getting model
model = get_model()
# when you are loading the model
#model = load_model('model-4.h5')
# training the model using generator
model.fit_generator(train_generator, steps_per_epoch=4*len(train_samples),validation_data=validation_generator, validation_steps=len(validation_samples),epochs=1, verbose=1)
# Saving the model
model.save('model-5.h5') | 3,725 | 0 | 68 |
f7907661b240e64761abf5f915f0c3ced1efa2dc | 8,462 | py | Python | sharper/flaskapp/helper.py | sluggard6/bgirl | 3c9fa895189ef16442694830d0c05cf60ee5187b | [
"Apache-2.0"
] | null | null | null | sharper/flaskapp/helper.py | sluggard6/bgirl | 3c9fa895189ef16442694830d0c05cf60ee5187b | [
"Apache-2.0"
] | null | null | null | sharper/flaskapp/helper.py | sluggard6/bgirl | 3c9fa895189ef16442694830d0c05cf60ee5187b | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
"""
flskapp/helper.py
~~~~~~~~~~~~~~
Flask框架帮助方法
"""
import os
from random import randint
import traceback
import urllib2
from sharper.util.string import random_number
from flask import get_flashed_messages, request, jsonify, current_app, logging, session
import sys
from ..lib.error import ErrorCode, AppError
from ..util.helper import get_utf8, get_unicode
from .logger import logger
import time
__authors__ = ['"linnchord gao" <linnchord@gmail.com>']
def get_flash_msg(_type=None, joiner=' '):
"""
获取指定类别所有flash消息拼接文本
@_type: ('ok', 'info', 'warn', 'alert')
"""
if _type:
return joiner.join(get_flashed_messages(category_filter=[_type]))
else:
return joiner.join(get_flashed_messages())
def need_json_response():
"""
判断是否需要返回json
"""
return 'application/json' in request.headers.get('Accept')
def print_redirect(url="/", text=None, duration=5, title=u'正在跳转', templ=None):
"""
打印内容并在指定时间(秒)跳转到指定url
@param text:
@param url:
@param duration:
@return:
"""
if not templ:
templ = u'<html>' \
u'<title>{title}</title>' \
u'<meta http-equiv="refresh" content="{duration}; url={url}" />' \
u'<body>' \
u'<h1>{text}</h1>' \
u'<span>{duration}秒后将跳转,请稍候</span>' \
u'</body>' \
u'</html>'
return templ.format(duration=duration, url=url, text=text, title=title)
def clear_cookie(resp, name_or_list):
"""
清除指定cookie
@resp: response
@name_or_list: cookie name or name list
"""
resp = current_app.make_response(resp)
if isinstance(name_or_list, basestring):
name_or_list = [name_or_list]
for n in name_or_list:
resp.set_cookie(n, '', expires=0)
return resp
def set_cookie(resp, name, value, expires,max_age=1800):
"""
设置cookie
"""
resp = current_app.make_response(resp)
resp.set_cookie(name, value,expires=expires,max_age=max_age)
return resp
def simple_times_limit_validate(category, key, limit=5, expire=300, _kvdb=None, more_paras=None, amount=1):
"""
针对指定类型+关键字参数+更多其他参数(dict类型拼接)在指定过期时间内仅允许n次(limit)访问
例如:
* 用户登录(类型)指定ip(关键字参数)在5分钟(expire)内只允许访问5次(limit)
* 某api指定ip或客户端在1分钟内只允许访问1000次
@category: 类型(例如 reg | login )
@key: 关键参数 (例如 203.12.213.30 )
@limit: 限制访问次数
@expire: 过期时间 单位:秒 通过redis key过期时间控制
@kvdb: redis库 默认kvdb.common
@more_paras: 用于较多参数变量控制,拼接为缓存键
"""
# redis缓存键构造
key = 'STLV:%s:%s' % (category, key)
if more_paras:
for k, v in more_paras.items():
key += ':%s:%s' % (k, v)
if not _kvdb:
from .kvdb import kvdb
_kvdb = kvdb.common
now = _kvdb.incr(key, amount=amount)
ttl = _kvdb.ttl(key)
if not ttl:
_kvdb.expire(key, expire)
return int(now) <= limit
def simple_vcode_validate(category, key, vcode=None, expire=300, _kvdb=None, more_paras=None):
"""
针对指定类型+关键字参数+更多其他参数(dict类型拼接)在指定过期时间设置验证码验证
例如:
* 用户手机绑定(类型)在5分钟(expire)内验证手机验证码
@category: 类型(例如 reg | login )
@key: 关键参数 (例如 手机号 18621111111 )
@vcode: 验证码 (若无则生成并返回验证码,若有则验证 )
@expire: 过期时间 单位:秒 通过redis key过期时间控制
@kvdb: redis库 默认kvdb.common
@more_paras: 用于较多参数变量控制,拼接为缓存键
"""
# redis缓存键构造
key = 'SPV:%s:%s' % (category, key)
if more_paras:
for k, v in more_paras.items():
key += ':%s:%s' % (k, v)
if not _kvdb:
from .kvdb import kvdb
_kvdb = kvdb.common
if vcode:
if vcode == _kvdb.get(key):
_kvdb.delete(key)
return True
else:
return False
else:
vcode = random_number(6)
_kvdb.setex(key, vcode, expire)
return vcode
def is_internal_ip():
"""
check internal ip
"""
ip = get_client_ip()
return (ip in current_app.config.get('INTERNAL_IP_LIST', [])
or ip in ('127.0.0.1', '0.0.0.0')
or ip.startswith('192.168.'))
| 24.527536 | 107 | 0.60494 | # -*- coding:utf-8 -*-
"""
flskapp/helper.py
~~~~~~~~~~~~~~
Flask框架帮助方法
"""
import os
from random import randint
import traceback
import urllib2
from sharper.util.string import random_number
from flask import get_flashed_messages, request, jsonify, current_app, logging, session
import sys
from ..lib.error import ErrorCode, AppError
from ..util.helper import get_utf8, get_unicode
from .logger import logger
import time
__authors__ = ['"linnchord gao" <linnchord@gmail.com>']
def get_flash_msg(_type=None, joiner=' '):
"""
获取指定类别所有flash消息拼接文本
@_type: ('ok', 'info', 'warn', 'alert')
"""
if _type:
return joiner.join(get_flashed_messages(category_filter=[_type]))
else:
return joiner.join(get_flashed_messages())
def need_json_response():
"""
判断是否需要返回json
"""
return 'application/json' in request.headers.get('Accept')
def print_redirect(url="/", text=None, duration=5, title=u'正在跳转', templ=None):
"""
打印内容并在指定时间(秒)跳转到指定url
@param text:
@param url:
@param duration:
@return:
"""
if not templ:
templ = u'<html>' \
u'<title>{title}</title>' \
u'<meta http-equiv="refresh" content="{duration}; url={url}" />' \
u'<body>' \
u'<h1>{text}</h1>' \
u'<span>{duration}秒后将跳转,请稍候</span>' \
u'</body>' \
u'</html>'
return templ.format(duration=duration, url=url, text=text, title=title)
def json_error_msg(msg, code, http_status=200):
resp = jsonify(success=False,
code=code,
error_code=code,
error_msg=msg,
message=msg,
serverTime=int(1000*time.time())
)
resp.status_code = http_status
return resp
def err2msg_code(err):
if isinstance(err, AppError):
msg = get_utf8(err.msg)
code = err.code
else:
code = getattr(err, 'code', 500)
msg = str(err)
return msg, code
def json_error(err='', http_status=200):
msg, code = err2msg_code(err)
return json_error_msg(msg, code, http_status)
def json_ok(**kwargs):
return jsonify(success=True,serverTime=int(time.time()*1000), **kwargs)
def xml_ok(**kwargs):
if 'status' in kwargs:
return to_xml(**kwargs)
return to_xml(status=1, **kwargs)
def xml_error_msg(err='', **kwargs):
return to_xml(status=0, msg=err, **kwargs)
def xml_error(**kwargs):
return to_xml(status=0, **kwargs)
def to_xml(http_status=200, **kwargs):
from pytoxml import PyToXml
dic = {}
for k in kwargs:
dic[k] = kwargs.get(k) if kwargs.get(k) != None else ""
resp = current_app.make_response(str(PyToXml("root", dic, xml_declaration=True).encode()))
resp.status_code = http_status
return resp
def render_json_warn(err, req, http_status=200):
msg, code = err2msg_code(err)
http_log_warn(msg, req, code)
return json_error(err, http_status)
def render_json_error(err, req, http_status=500):
msg, code = err2msg_code(err)
http_log_error(msg, req, code)
return json_error(err, http_status)
def http_log_error(msg, req, code=ErrorCode.Error):
http_log(msg, 'error', req, code)
def http_log_warn(msg, req, code=ErrorCode.Warn):
http_log(msg, 'warn', req, code)
def http_log_info(msg, req):
http_log(msg, 'info', req, ErrorCode.Info)
def http_log(msg, level, req, code=500):
log_func_map = {'error': log_error, 'warn': logger.warn, 'info': logger.info}
if level in log_func_map:
log_func_map[level](u'%s-%s: %s %s %s --Referrer [%s] --Agent %s' % (
req.remote_addr,
code,
msg,
req.__repr__(),
req.form.__repr__(),
request.headers.get('Referer', ''),
get_agent(req)
))
else:
logger.warn('Wrong logging level!')
def log_error(msg):
logger.error(msg)
trac = traceback.format_exc()
if trac and trac.strip() != 'None':
logger.error(trac)
def get_agent(request):
agent = request.headers.get('User-Agent') or ""
try:
if isinstance(agent, str):
agent = agent.decode('utf-8')
except:
agent = ""
return agent
def get_client_type():
agent = get_agent(request)
if not agent:
return None
agent = agent.lower()
if agent.find("iphone os") != -1:
return "ios"
if agent.find("android") != -1:
if agent.find("android_tv") != -1:
return "android_tv"
return "android"
return "web"
def get_client_version():
ua_infos = ua_parse()
if ua_infos.get('os') == 'Android':
version_code = int(ua_infos.get('client_version'))
elif ua_infos.get('os') == 'iOS':
version_code = int(ua_infos.get('client_version'))
else:
version_code = 0
return version_code
def get_client_ip():
return request.headers.get('X-Forwarded-For', None) or request.remote_addr
def get_cookie(key, is_urlencode=True):
if is_urlencode:
return urllib2.unquote(request.cookies.get(key, '').encode('utf-8')).decode('utf-8')
else:
return get_unicode(request.cookies.get(key, ''))
def clear_cookie(resp, name_or_list):
"""
清除指定cookie
@resp: response
@name_or_list: cookie name or name list
"""
resp = current_app.make_response(resp)
if isinstance(name_or_list, basestring):
name_or_list = [name_or_list]
for n in name_or_list:
resp.set_cookie(n, '', expires=0)
return resp
def set_cookie(resp, name, value, expires,max_age=1800):
"""
设置cookie
"""
resp = current_app.make_response(resp)
resp.set_cookie(name, value,expires=expires,max_age=max_age)
return resp
def simple_times_limit_validate(category, key, limit=5, expire=300, _kvdb=None, more_paras=None, amount=1):
"""
针对指定类型+关键字参数+更多其他参数(dict类型拼接)在指定过期时间内仅允许n次(limit)访问
例如:
* 用户登录(类型)指定ip(关键字参数)在5分钟(expire)内只允许访问5次(limit)
* 某api指定ip或客户端在1分钟内只允许访问1000次
@category: 类型(例如 reg | login )
@key: 关键参数 (例如 203.12.213.30 )
@limit: 限制访问次数
@expire: 过期时间 单位:秒 通过redis key过期时间控制
@kvdb: redis库 默认kvdb.common
@more_paras: 用于较多参数变量控制,拼接为缓存键
"""
# redis缓存键构造
key = 'STLV:%s:%s' % (category, key)
if more_paras:
for k, v in more_paras.items():
key += ':%s:%s' % (k, v)
if not _kvdb:
from .kvdb import kvdb
_kvdb = kvdb.common
now = _kvdb.incr(key, amount=amount)
ttl = _kvdb.ttl(key)
if not ttl:
_kvdb.expire(key, expire)
return int(now) <= limit
def simple_vcode_validate(category, key, vcode=None, expire=300, _kvdb=None, more_paras=None):
"""
针对指定类型+关键字参数+更多其他参数(dict类型拼接)在指定过期时间设置验证码验证
例如:
* 用户手机绑定(类型)在5分钟(expire)内验证手机验证码
@category: 类型(例如 reg | login )
@key: 关键参数 (例如 手机号 18621111111 )
@vcode: 验证码 (若无则生成并返回验证码,若有则验证 )
@expire: 过期时间 单位:秒 通过redis key过期时间控制
@kvdb: redis库 默认kvdb.common
@more_paras: 用于较多参数变量控制,拼接为缓存键
"""
# redis缓存键构造
key = 'SPV:%s:%s' % (category, key)
if more_paras:
for k, v in more_paras.items():
key += ':%s:%s' % (k, v)
if not _kvdb:
from .kvdb import kvdb
_kvdb = kvdb.common
if vcode:
if vcode == _kvdb.get(key):
_kvdb.delete(key)
return True
else:
return False
else:
vcode = random_number(6)
_kvdb.setex(key, vcode, expire)
return vcode
def is_internal_ip():
"""
check internal ip
"""
ip = get_client_ip()
return (ip in current_app.config.get('INTERNAL_IP_LIST', [])
or ip in ('127.0.0.1', '0.0.0.0')
or ip.startswith('192.168.'))
def get_https_url(url):
if url.startswith('https://'):
return url
elif url.startswith('http://'):
return 'https' + url[4:]
else:
return 'https://%s/%s' % (current_app.config.get('DOMAIN'), url.strip('/'))
def get_http_url(url):
if url.startswith('http://'):
return url
elif url.startswith('https://'):
return 'http' + url[5:]
else:
return 'http://%s/%s' % (current_app.config.get('DOMAIN'), url.strip('/'))
def set_return_url(url):
session['return_url'] = url
def get_return_url(default=None):
return session.pop('return_url', default)
| 3,805 | 0 | 552 |
95ceeda69677ea47c26fa6c65cda51757e65fe34 | 10,410 | py | Python | src/modify_log.py | mattiafrak/Processes-Predictions-with-MP-A-Priori-Knowledge | 7e1bb94bb2fc535972a351f543be4f0ad8475275 | [
"MIT"
] | null | null | null | src/modify_log.py | mattiafrak/Processes-Predictions-with-MP-A-Priori-Knowledge | 7e1bb94bb2fc535972a351f543be4f0ad8475275 | [
"MIT"
] | null | null | null | src/modify_log.py | mattiafrak/Processes-Predictions-with-MP-A-Priori-Knowledge | 7e1bb94bb2fc535972a351f543be4f0ad8475275 | [
"MIT"
] | null | null | null | """
Here timestamps are updated in order to have elapsed times following a particular pattern/rule
Author: Mattia Fraccaro
"""
import csv
import time
from datetime import datetime, timedelta
from random import *
| 47.534247 | 173 | 0.397214 | """
Here timestamps are updated in order to have elapsed times following a particular pattern/rule
Author: Mattia Fraccaro
"""
import csv
import time
from datetime import datetime, timedelta
from random import *
class ModifyLog:
log_name = '50x5_3W'
difflist = []
timestamps_list = []
csvfile = open('../data/final_experiments/%s.csv' % log_name, 'r')
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
next(spamreader, None) # skip the headers
for row in spamreader:
# x = randint(1, 3)
# #print(x)
if row[1] == '0':
tdiff = 0
#if row[1] == '1':
#tdiff = 5 * 3
# if row[1] == '2':
# tdiff = 10 * x
# if row[1] == '3':
# tdiff = 25 * x
# if row[1] == '4':
# tdiff = 12 * x
# if row[1] == '5':
# tdiff = 8 * x
# if row[1] == '6':
# tdiff = 30 * x
# if row[1] == '7':
# tdiff = 40 * x
# if row[1] == '8':
# tdiff = 20 * x
# if row[1] == '9':
# tdiff = 3 * x
# if row[1] == '1' and row[3] == '0':
# tdiff = 25
# if row[1] == '1' and row[3] == '1':
# tdiff = randint(40,60)
# if row[1] == '1' and row[3] == '2':
# tdiff = randint(50,70)
# if row[1] == '1' and row[3] == '3':
# tdiff = randint(10,20)
# if row[1] == '1' and row[3] == '4':
# tdiff = 15
# if row[1] == '2' and row[3] == '0':
# tdiff = 75
# if row[1] == '2' and row[3] == '1':
# tdiff = randint(30,40)
# if row[1] == '2' and row[3] == '2':
# tdiff = randint(50,70)
# if row[1] == '2' and row[3] == '3':
# tdiff = randint(100,115)
# if row[1] == '2' and row[3] == '4':
# tdiff = 45
# if row[1] == '3' and row[3] == '2' and int(row[5])<=9492:
# tdiff = 70
# if row[1] == '3' and row[3] == '2' and int(row[5])>9492:
# tdiff = 140
# if row[1] == '3' and row[3] == '1':
# tdiff = randint(120,150)
# if row[1] == '3' and row[3] == '0':
# tdiff = randint(100,110)
# if row[1] == '3' and row[3] == '3':
# tdiff = randint(80,95)
# if row[1] == '3' and row[3] == '4':
# tdiff = 90
# if row[1] == '4' and row[3] == '0':
# tdiff = randint(5,15)
# if row[1] == '4' and row[3] == '1' and int(row[5])<=9492:
# tdiff = 50
# if row[1] == '4' and row[3] == '1' and int(row[5])>9492:
# tdiff = 10
# if row[1] == '4' and row[3] == '2':
# tdiff = 25
# if row[1] == '4' and row[3] == '3':
# tdiff = randint(10,20)
# if row[1] == '4' and row[3] == '4':
# tdiff = randint(20,30)
# if row[1] == '5' and row[3] == '0':
# tdiff = 70
# if row[1] == '5' and row[3] == '1':
# tdiff = randint(35,55)
# if row[1] == '5' and row[3] == '2':
# tdiff = randint(80,95)
# if row[1] == '5' and row[3] == '3':
# tdiff = randint(100,130)
# if row[1] == '5' and row[3] == '4':
# tdiff = 80
if row[3] == '0':
if row[1] == '1' or row[1] == '6' or row[1] == '11' or row[1] == '17' or row[1] == '23' or row[1] == '29' or row[1] == '35' or row[1] == '41' or row[1] == '47':
tdiff = 90
if row[3] == '1':
if row[1] == '1' or row[1] == '6' or row[1] == '11' or row[1] == '17' or row[1] == '23' or row[1] == '29' or row[1] == '35' or row[1] == '41' or row[1] == '47':
tdiff = 80
if row[3] == '2':
if row[1] == '1' or row[1] == '6' or row[1] == '11' or row[1] == '17' or row[1] == '23' or row[1] == '29' or row[1] == '35' or row[1] == '41' or row[1] == '47':
tdiff = 60
if row[3] == '3':
if row[1] == '1' or row[1] == '6' or row[1] == '11' or row[1] == '17' or row[1] == '23' or row[1] == '29' or row[1] == '35' or row[1] == '41' or row[1] == '47':
tdiff = 80
if row[3] == '4':
if row[1] == '1' or row[1] == '6' or row[1] == '11' or row[1] == '17' or row[1] == '23' or row[1] == '29' or row[1] == '35' or row[1] == '41' or row[1] == '47':
tdiff = 40
if row[3] == '0':
if row[1] == '2' or row[1] == '7' or row[1] == '12' or row[1] == '18' or row[1] == '24' or row[1] == '30' or row[1] == '36' or row[1] == '42' or row[1] == '48':
tdiff = 30
if row[3] == '1':
if row[1] == '2' or row[1] == '7' or row[1] == '12' or row[1] == '18' or row[1] == '24' or row[1] == '30' or row[1] == '36' or row[1] == '42' or row[1] == '48':
tdiff = randint(20,30)
if row[3] == '2':
if row[1] == '2' or row[1] == '7' or row[1] == '12' or row[1] == '18' or row[1] == '24' or row[1] == '30' or row[1] == '36' or row[1] == '42' or row[1] == '48':
tdiff = 25
if row[3] == '3':
if row[1] == '2' or row[1] == '7' or row[1] == '12' or row[1] == '18' or row[1] == '24' or row[1] == '30' or row[1] == '36' or row[1] == '42' or row[1] == '48':
tdiff = 30
if row[3] == '4':
if row[1] == '2' or row[1] == '12' or row[1] == '18' or row[1] == '24' or row[1] == '30' or row[1] == '36' or row[1] == '42' or row[1] == '48':
tdiff = 45
if row[3] == '0':
if row[1] == '3' or row[1] == '8' or row[1] == '13' or row[1] == '19' or row[1] == '25' or row[1] == '31' or row[1] == '37' or row[1] == '43' or row[1] == '49':
tdiff = 120
if row[3] == '1':
if row[1] == '3' or row[1] == '8' or row[1] == '13' or row[1] == '19' or row[1] == '25' or row[1] == '31' or row[1] == '37' or row[1] == '43' or row[1] == '49':
tdiff = 110
if row[3] == '2':
if row[1] == '3' or row[1] == '8' or row[1] == '13' or row[1] == '19' or row[1] == '25' or row[1] == '31' or row[1] == '37' or row[1] == '43' or row[1] == '49':
tdiff = 100
if row[3] == '3':
if row[1] == '3' or row[1] == '8' or row[1] == '13' or row[1] == '19' or row[1] == '25' or row[1] == '31' or row[1] == '37' or row[1] == '43' or row[1] == '49':
tdiff = 70
if row[3] == '4':
if row[1] == '3' or row[1] == '8' or row[1] == '13' or row[1] == '19' or row[1] == '25' or row[1] == '31' or row[1] == '37' or row[1] == '43' or row[1] == '49':
tdiff = 85
if row[3] == '0':
if row[1] == '4' or row[1] == '9' or row[1] == '14' or row[1] == '20' or row[1] == '26' or row[1] == '32' or row[1] == '38' or row[1] == '44' or row[1] == '50':
tdiff = 15
if row[3] == '1':
if row[1] == '4' or row[1] == '14' or row[1] == '20' or row[1] == '26' or row[1] == '32' or row[1] == '38' or row[1] == '44' or row[1] == '50':
tdiff = 10
if row[3] == '2':
if row[1] == '4' or row[1] == '9' or row[1] == '14' or row[1] == '20' or row[1] == '26' or row[1] == '32' or row[1] == '38' or row[1] == '44' or row[1] == '50':
tdiff = 25
if row[3] == '3':
if row[1] == '4' or row[1] == '9' or row[1] == '14' or row[1] == '20' or row[1] == '26' or row[1] == '32' or row[1] == '38' or row[1] == '44' or row[1] == '50':
tdiff = 20
if row[3] == '4':
if row[1] == '4' or row[1] == '14' or row[1] == '20' or row[1] == '26' or row[1] == '32' or row[1] == '38' or row[1] == '44' or row[1] == '50':
tdiff = 20
if row[3] == '0':
if row[1] == '5' or row[1] == '10' or row[1] == '15' or row[1] == '20' or row[1] == '25' or row[1] == '30' or row[1] == '35' or row[1] == '40' or row[1] == '45':
tdiff = 70
if row[3] == '1':
if row[1] == '5' or row[1] == '10' or row[1] == '15' or row[1] == '20' or row[1] == '25' or row[1] == '30' or row[1] == '35' or row[1] == '40' or row[1] == '45':
tdiff = randint(35,55)
if row[3] == '2':
if row[1] == '5' or row[1] == '10' or row[1] == '15' or row[1] == '20' or row[1] == '25' or row[1] == '30' or row[1] == '35' or row[1] == '40' or row[1] == '45':
tdiff = randint(80,95)
if row[3] == '3':
if row[1] == '5' or row[1] == '10' or row[1] == '15' or row[1] == '20' or row[1] == '25' or row[1] == '30' or row[1] == '35' or row[1] == '40' or row[1] == '45':
tdiff = randint(100,130)
if row[3] == '4':
if row[1] == '5' or row[1] == '10' or row[1] == '15' or row[1] == '20' or row[1] == '25' or row[1] == '30' or row[1] == '35' or row[1] == '40' or row[1] == '45':
tdiff = 80
if row[1] == '9' and row[3] == '4' and int(row[5])<=13271:
tdiff = 20
if row[1] == '9' and row[3] == '4' and int(row[5])>13271:
tdiff = 60
if row[1] == '9' and row[3] == '1' and int(row[5])<=13271:
tdiff = 50
if row[1] == '9' and row[3] == '1' and int(row[5])>13271:
tdiff = 10
if row[1] == '7' and row[3] == '4' and int(row[5])<=13271:
tdiff = 70
if row[1] == '7' and row[3] == '4' and int(row[5])>13271:
tdiff = 140
#tdiff = row[4]
difflist.append(tdiff)
difflist = [int(i) for i in difflist]
print(difflist)
csvfile.seek(0)
header = next(spamreader, None)
line_index = 0
csvfile2 = open('../data2/final_experiments/%s.csv' % log_name, 'w')
spamwriter = csv.writer(csvfile2)
if header:
spamwriter.writerow(header)
for row in spamreader:
if row[1] == '0':
t0 = datetime.strptime(row[2], "%Y-%m-%d %H:%M:%S")
#timestamps_list.append(t0)
row[4] = str(0)
spamwriter.writerow(row)
print(t0)
if row[1] != '0':
t = t0 + timedelta(seconds=difflist[line_index])
row[2] = str(t)
row[4] = str(difflist[line_index])
spamwriter.writerow(row)
#timestamps_list.append(t)
print(t)
t0 = t
line_index += 1
| 0 | 10,173 | 23 |
555ed55160246b66953bd0e607e4ba2a8294241c | 4,630 | py | Python | bq_data_access/v1/pairwise.py | isb-cgc/ISB-CGC-Webapp | 52ed5366ee295e938108a4687bad551a8dee6b34 | [
"Apache-2.0"
] | 13 | 2016-01-14T02:43:10.000Z | 2020-11-25T20:43:05.000Z | bq_data_access/v1/pairwise.py | isb-cgc/ISB-CGC-Webapp | 52ed5366ee295e938108a4687bad551a8dee6b34 | [
"Apache-2.0"
] | 84 | 2015-11-20T02:03:33.000Z | 2021-10-14T19:24:24.000Z | bq_data_access/v1/pairwise.py | isb-cgc/ISB-CGC-Webapp | 52ed5366ee295e938108a4687bad551a8dee6b34 | [
"Apache-2.0"
] | 5 | 2015-11-25T19:29:53.000Z | 2019-09-04T17:37:52.000Z | #
# Copyright 2015-2019, Institute for Systems Biology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from future import standard_library
standard_library.install_aliases()
from builtins import object
import json
import base64
import logging
import urllib.request, urllib.parse, urllib.error
import traceback
import requests
from django.conf import settings
from bq_data_access.v1.data_access import get_feature_vector
from bq_data_access.v1.feature_value_types import ValueType
from bq_data_access.v1.utils import VectorMergeSupport
logger = logging.getLogger('main_logger')
| 31.712329 | 88 | 0.649892 | #
# Copyright 2015-2019, Institute for Systems Biology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from future import standard_library
standard_library.install_aliases()
from builtins import object
import json
import base64
import logging
import urllib.request, urllib.parse, urllib.error
import traceback
import requests
from django.conf import settings
from bq_data_access.v1.data_access import get_feature_vector
from bq_data_access.v1.feature_value_types import ValueType
from bq_data_access.v1.utils import VectorMergeSupport
logger = logging.getLogger('main_logger')
class PairwiseInputVector(object):
def __init__(self, feature_id, value_type, data):
self.feature_id = feature_id
self.value_type = value_type
self.data = data
class Pairwise(object):
def __init__(self):
pass
@classmethod
def prepare_features(self, cohort_id, features):
# Get the feature data
feature_vector_mapping = {}
vectors = []
for feature in features:
value_type, vector = get_feature_vector(feature, cohort_id)
if value_type == ValueType.INTEGER or value_type == ValueType.FLOAT:
value_type = "N"
elif value_type == ValueType.STRING:
value_type = "C"
else:
value_type = "B"
feature_vector_mapping[feature] = (value_type, vector)
vectors.append(vector)
# Create merged feature vectors
vms = VectorMergeSupport('NA', 'sample_id', row_ids=features)
for feature in list(feature_vector_mapping.keys()):
vms.add_dict_array(feature_vector_mapping[feature][1], feature, 'value')
merged = vms.get_merged_dict()
rows = []
for feature in list(feature_vector_mapping.keys()):
current_row = [feature_vector_mapping[feature][0] + ":" + feature]
for item in merged:
current_row.append(item[feature])
rows.append("\t".join(current_row))
return rows
@classmethod
def prepare_feature_vector(self, input_vectors):
feature_vector_mapping = {}
vectors = []
for item in input_vectors:
feature_id, value_type, vector = item.feature_id, item.value_type, item.data
if value_type == ValueType.INTEGER or value_type == ValueType.FLOAT:
value_type = "N"
elif value_type == ValueType.STRING:
value_type = "C"
else:
value_type = "B"
feature_vector_mapping[feature_id] = (value_type, vector)
vectors.append(vector)
# Create merged feature vectors
feature_ids = [v.feature_id for v in input_vectors]
vms = VectorMergeSupport('NA', 'sample_id', 'case_id', row_ids=feature_ids)
for feature in list(feature_vector_mapping.keys()):
vms.add_dict_array(feature_vector_mapping[feature][1], feature, 'value')
merged = vms.get_merged_dict()
rows = []
for feature in list(feature_vector_mapping.keys()):
current_row = [feature_vector_mapping[feature][0] + ":" + feature]
for item in merged:
current_row.append(item[feature])
rows.append("\t".join(current_row))
return rows
@classmethod
def run_pairwise(self, feature_rows):
url = settings.PAIRWISE_SERVICE_URL
data_dict = {}
row_count = 1
for row in feature_rows:
label = "row_{count}".format(count=row_count)
data_dict[label] = row
row_count += 1
# Encode the data to be sent to the service
data = urllib.parse.urlencode(data_dict)
decoded_response = None
try:
pairwise_response = requests.post(url=url, data=data)
response = pairwise_response.content
decoded_response = json.loads(base64.b64decode(response))
except Exception as e:
decoded_response = None
logger.error(traceback.format_exc())
return decoded_response
| 3,304 | 173 | 72 |
560c7f23e196225c88dd9191928c23898f056340 | 2,347 | py | Python | tests/TestModules/HExample_model.py | levilucio/SyVOLT | 7526ec794d21565e3efcc925a7b08ae8db27d46a | [
"MIT"
] | 3 | 2017-06-02T19:26:27.000Z | 2021-06-14T04:25:45.000Z | tests/TestModules/HExample_model.py | levilucio/SyVOLT | 7526ec794d21565e3efcc925a7b08ae8db27d46a | [
"MIT"
] | 8 | 2016-08-24T07:04:07.000Z | 2017-05-26T16:22:47.000Z | tests/TestModules/HExample_model.py | levilucio/SyVOLT | 7526ec794d21565e3efcc925a7b08ae8db27d46a | [
"MIT"
] | 1 | 2019-10-31T06:00:23.000Z | 2019-10-31T06:00:23.000Z |
from core.himesis import Himesis
import cPickle as pickle
from uuid import UUID
| 39.779661 | 90 | 0.536003 |
from core.himesis import Himesis
import cPickle as pickle
from uuid import UUID
class HExample_model(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HExample_model.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HExample_model, self).__init__(name='HExample_model', num_nodes=8, edges=[])
# Add the edges
self.add_edges([(1, 6), (6, 2), (4, 7), (7, 0), (5, 0), (2, 5)])
# Set the graph attributes
self["mm__"] = pickle.loads("""(lp1
S'PoliceStationMM'
p2
a.""")
self["name"] = """example_model"""
self["GUID__"] = UUID('e9c19091-08a9-4340-b314-29411f06a394')
# Set the node attributes
self.vs[0]["name"] = """s_"""
self.vs[0]["classtype"] = """1"""
self.vs[0]["mm__"] = """Male_S"""
self.vs[0]["cardinality"] = """s_"""
self.vs[0]["GUID__"] = UUID('479e4dc3-85cf-4876-817f-624128caf201')
self.vs[1]["name"] = """s_"""
self.vs[1]["classtype"] = """t_"""
self.vs[1]["mm__"] = """Station_T"""
self.vs[1]["GUID__"] = UUID('665717af-fef8-4c40-a106-8e353ddad551')
self.vs[2]["name"] = """s_"""
self.vs[2]["classtype"] = """1"""
self.vs[2]["mm__"] = """Station_S"""
self.vs[2]["cardinality"] = """s_"""
self.vs[2]["GUID__"] = UUID('4a68a359-fcb8-4618-a805-c4f767447ade')
self.vs[3]["name"] = """s_"""
self.vs[3]["classtype"] = """1"""
self.vs[3]["mm__"] = """Female_S"""
self.vs[3]["cardinality"] = """s_"""
self.vs[3]["GUID__"] = UUID('0d7432cd-bd78-4e8d-aa12-61948ce0ee15')
self.vs[4]["name"] = """s_"""
self.vs[4]["classtype"] = """t_"""
self.vs[4]["mm__"] = """Male_T"""
self.vs[4]["GUID__"] = UUID('09b60957-c440-4df4-b8e2-09349a252d81')
self.vs[5]["associationType"] = """t_"""
self.vs[5]["mm__"] = """directLink_S"""
self.vs[5]["GUID__"] = UUID('91895446-c16e-4530-a940-87af5188a4cf')
self.vs[6]["mm__"] = """backward_link"""
self.vs[6]["GUID__"] = UUID('5f47bb76-a271-49c9-8a68-d407eaf0ffa3')
self.vs[7]["mm__"] = """backward_link"""
self.vs[7]["GUID__"] = UUID('e9d32fca-f740-44ec-8eec-f917de1f6633')
| 0 | 2,241 | 23 |
7b1d970012536ca4aca54342f04b8d16bdf1ea02 | 783 | py | Python | tests/test_hover.py | ponty/discogui | c3dda104bcf5188252848938b2ebdaeeae4d1470 | [
"BSD-2-Clause"
] | 5 | 2018-11-19T14:35:28.000Z | 2020-01-20T17:18:30.000Z | tests/test_hover.py | ponty/discogui | c3dda104bcf5188252848938b2ebdaeeae4d1470 | [
"BSD-2-Clause"
] | null | null | null | tests/test_hover.py | ponty/discogui | c3dda104bcf5188252848938b2ebdaeeae4d1470 | [
"BSD-2-Clause"
] | null | null | null | from easyprocess import EasyProcess
from pyvirtualdisplay.smartdisplay import SmartDisplay
from discogui.hover import active_rectangles
| 27 | 66 | 0.592593 | from easyprocess import EasyProcess
from pyvirtualdisplay.smartdisplay import SmartDisplay
from discogui.hover import active_rectangles
def test_zenity():
with SmartDisplay() as disp:
with EasyProcess(["zenity", "--warning"]):
disp.waitgrab()
ls = active_rectangles()
assert len(ls) == 1
def test_notab():
with SmartDisplay() as disp:
with EasyProcess(["xmessage", "-buttons", "x,y,z", "hi"]):
disp.waitgrab()
ls = active_rectangles(grid=10)
assert len(ls) == 3
def test_gmessage():
with SmartDisplay() as disp:
with EasyProcess(["gmessage", "-buttons", "x,y,z", "hi"]):
disp.waitgrab()
ls = active_rectangles()
assert len(ls) == 3
| 574 | 0 | 69 |
100c4a9136eb4bb23e2157a31502b9018a9a3418 | 17,040 | py | Python | tickettemplate/ttadmin.py | t-kenji/trac-ticket-template-plugin | 71f58052eb555393922da0d8295454cad6a3bce0 | [
"BSD-3-Clause"
] | null | null | null | tickettemplate/ttadmin.py | t-kenji/trac-ticket-template-plugin | 71f58052eb555393922da0d8295454cad6a3bce0 | [
"BSD-3-Clause"
] | null | null | null | tickettemplate/ttadmin.py | t-kenji/trac-ticket-template-plugin | 71f58052eb555393922da0d8295454cad6a3bce0 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2013 Richard Liao <richard.liao.i@gmail.com>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
from __future__ import with_statement
import inspect
import textwrap
import time
import urllib
from pkg_resources import resource_exists, resource_filename
from trac.admin.api import IAdminCommandProvider, IAdminPanelProvider
from trac.core import *
from trac.config import BoolOption, ListOption, Option
from trac.db import DatabaseManager
from trac.env import IEnvironmentSetupParticipant
from trac.perm import IPermissionRequestor
from trac.ticket import Ticket, Type as TicketType
from trac.util.translation import domain_functions
from trac.web.api import IRequestHandler, ITemplateStreamFilter, RequestDone
from trac.web.chrome import Chrome, ITemplateProvider, add_script, \
add_script_data
try:
import json
except ImportError:
import simplejson as json
from default_templates import DEFAULT_TEMPLATES
from tickettemplate.model import TT_Template, schema, schema_version
from utils import *
gettext, _, tag_, N_, add_domain = \
domain_functions('tickettemplate', 'gettext', '_', 'tag_', 'N_',
'add_domain')
| 33.742574 | 83 | 0.564906 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2013 Richard Liao <richard.liao.i@gmail.com>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
from __future__ import with_statement
import inspect
import textwrap
import time
import urllib
from pkg_resources import resource_exists, resource_filename
from trac.admin.api import IAdminCommandProvider, IAdminPanelProvider
from trac.core import *
from trac.config import BoolOption, ListOption, Option
from trac.db import DatabaseManager
from trac.env import IEnvironmentSetupParticipant
from trac.perm import IPermissionRequestor
from trac.ticket import Ticket, Type as TicketType
from trac.util.translation import domain_functions
from trac.web.api import IRequestHandler, ITemplateStreamFilter, RequestDone
from trac.web.chrome import Chrome, ITemplateProvider, add_script, \
add_script_data
try:
import json
except ImportError:
import simplejson as json
from default_templates import DEFAULT_TEMPLATES
from tickettemplate.model import TT_Template, schema, schema_version
from utils import *
gettext, _, tag_, N_, add_domain = \
domain_functions('tickettemplate', 'gettext', '_', 'tag_', 'N_',
'add_domain')
class TicketTemplateModule(Component):
implements(IAdminCommandProvider, IAdminPanelProvider,
IEnvironmentSetupParticipant, IPermissionRequestor,
IRequestHandler, ITemplateProvider, ITemplateStreamFilter)
SECTION_NAME = 'tickettemplate'
enable_custom = BoolOption(SECTION_NAME, 'enable_custom', True,
"""Display the My Template sidebar.""")
field_list = ListOption(SECTION_NAME, 'field_list',
'summary, description, reporter, owner, priority, cc, milestone, '
'component, version, type',
doc="""List of fields that can be included in the template.""")
json_template_file = Option(SECTION_NAME, 'json_template_file', '',
"""File containing templates.""")
def __init__(self):
locale_dir = resource_filename(__name__, 'locale')
add_domain(self.env.path, locale_dir)
# IPermissionRequestor methods
def get_permission_actions(self):
actions = ['TT_USER', ('TT_ADMIN', ['TT_USER'])]
return actions
# IEnvironmentSetupParticipant methods
def environment_created(self):
# Create the required tables
connector, _ = DatabaseManager(self.env)._get_connector()
with self.env.db_transaction as db:
for table in schema:
for stmt in connector.to_sql(table):
db(stmt)
db("""INSERT INTO system (name,value)
VALUES ('tt_version', %s)
""", (schema_version,))
# Create some default templates
if self.json_template_file == '':
# use default templates from module
self._insert_templates(DEFAULT_TEMPLATES)
else:
self.ticket_template_import(self.json_template_file)
def environment_needs_upgrade(self, db=None):
for value, in self.env.db_query("""
SELECT value FROM system WHERE name='tt_version'
"""):
return int(value) < schema_version
return True
def upgrade_environment(self, db=None):
for value, in self.env.db_query("""
SELECT value FROM system WHERE name='tt_version'
"""):
current_version = int(value)
break
else:
self.environment_created()
current_version = 0
from tickettemplate import upgrades
for version in range(current_version + 1, schema_version + 1):
for function in upgrades.map.get(version):
print textwrap.fill(inspect.getdoc(function))
function(self.env, db)
print 'Done.'
self.env.db_transaction("""
UPDATE system SET value=%s WHERE name='tt_version'
""", (schema_version,))
self.log.info("Upgraded tt tables from version %d to %d",
current_version, schema_version)
def _insert_templates(self, templates):
"""
accept list of tuples called templates and insert into database.
example: templates = [('tt_name','tt_value'),]
"""
now = int(time.time())
for tt_name, tt_value in templates:
record = [
now,
SYSTEM_USER,
tt_name,
'description',
tt_value,
]
TT_Template.insert(self.env, record)
# increment timestamp; other code expects it to be unique
now += 1
# IAdminCommandProvider methods
def get_admin_commands(self):
"""Implement get_admin_commands to provide two trac-admin commands:
*ticket_template export*
export ticket_templates as json to stdout
*ticket_template import <json_template_file>*
import ticket_templates from json file specified in trac.ini
"""
yield ('ticket_template export', '',
"""export ticket templates as json to stdout""",
None, self.ticket_template_export)
yield ('ticket_template import', '<json_template_file>',
"""import ticket templates from json file
Specify json file path via:
* json_template_file argument
* json_template_file option in trac.ini
""",
None, self.ticket_template_import)
def ticket_template_export(self):
"""export current ticket templates as json to stdout"""
template_names = TT_Template.fetchNames(self.env)
export_data = []
for template_name in template_names:
export_datum = (
template_name,
TT_Template.fetch(self.env, template_name),
)
export_data.append(export_datum)
print(json.dumps(export_data, indent=2))
def ticket_template_import(self, json_template_file=''):
"""
Import ticket templates from json file.
Specify json file path via:
* json_template_file argument
* json_template_file option in trac.ini
"""
json_template_file = json_template_file or self.json_template_file
if json_template_file or self.json_template_file:
# convert template_file json to python data structure then insert
with open(json_template_file) as f:
self._insert_templates(json.load(f))
# IAdminPanelProvider methods
def get_admin_panels(self, req):
if 'TT_ADMIN' in req.perm:
yield ('ticket', _("Ticket System"), self.SECTION_NAME,
_("Ticket Template"))
def render_admin_panel(self, req, cat, page, path_info):
req.perm.require('TT_ADMIN')
data = {
'gettext': gettext,
'_': _,
'tag_': tag_,
'N_': N_,
}
data['options'] = [t.name for t in TicketType.select(self.env)] + \
[_("default")]
data['type'] = req.args.get('type')
if 'id' in req.args:
# after load history
id = req.args.get('id')
data['tt_text'] = self._loadTemplateTextById(id)
data['type'] = self._getNameById(id)
elif req.method == 'POST':
# Load
if req.args.get('loadtickettemplate'):
tt_name = req.args.get('type')
data['tt_text'] = self._loadTemplateText(tt_name)
# Load history
if req.args.get('loadhistory'):
tt_name = req.args.get('type')
data['tt_name'] = tt_name
tt_history = []
for id, modi_time, tt_name, tt_text \
in TT_Template.selectByName(self.env, tt_name):
history = {'id': id, 'tt_name': tt_name,
'modi_time': self._formatTime(int(modi_time)),
'tt_text': tt_text,
'href': req.abs_href.admin(cat, page,
{'id': id})}
tt_history.append(history)
data['tt_history'] = tt_history
return 'loadhistory.html', data
# Save
elif req.args.get('savetickettemplate'):
tt_text = req.args.get('description').replace('\r', '')
tt_name = req.args.get('type')
self._saveTemplateText(tt_name, tt_text)
data['tt_text'] = tt_text
# preview
elif req.args.get('preview'):
tt_text = req.args.get('description').replace('\r', '')
tt_name = req.args.get('type')
description_preview = \
self._previewTemplateText(tt_name, tt_text, req)
data['tt_text'] = tt_text
data['description_preview'] = description_preview
return 'admin_tickettemplate.html', data
# ITemplateProvider methods
def get_templates_dirs(self):
return [resource_filename(__name__, 'templates')]
def get_htdocs_dirs(self):
return [('tt', resource_filename(__name__, 'htdocs'))]
# IRequestHandler methods
def match_request(self, req):
return req.path_info.startswith('/tt')
def process_request(self, req):
req.perm.assert_permission('TICKET_CREATE')
data = {
'gettext': gettext,
'_': _,
'tag_': tag_,
'N_': N_,
}
if req.path_info.startswith('/tt/query'):
# handle XMLHTTPRequest
data['req_args'] = req.args
data.update({'tt_user': req.authname})
result = TT_Template.fetchAll(self.env, data)
result['status'] = '1'
result['field_list'] = self._getFieldList()
if self.enable_custom and 'TT_USER' in req.perm:
result['enable_custom'] = True
else:
result['enable_custom'] = False
if 'warning' in req.args:
result['warning'] = req.args['warning']
json_str = json.dumps(result)
self._sendResponse(req, json_str)
# tt_custom save
elif req.path_info.startswith('/tt/custom_save'):
tt_name, custom_template = self._handleCustomSave(req)
result = {
'status': '1',
'tt_name': tt_name,
'new_template': custom_template
}
json_str = json.dumps(result)
self._sendResponse(req, json_str)
# tt_custom delete
elif req.path_info.startswith('/tt/custom_delete'):
tt_name = self._handleCustomDelete(req)
result = {
'status': '1',
'tt_name': tt_name
}
json_str = json.dumps(result)
self._sendResponse(req, json_str)
elif req.path_info.startswith('/tt/edit_buffer_save'):
tt_name, custom_template = self._handleCustomSave(req)
result = {
'status': '1',
'tt_name': tt_name,
'new_template': custom_template
}
json_str = json.dumps(result)
self._sendResponse(req, json_str)
# ITemplateStreamFilter methods
def filter_stream(self, req, method, filename, stream, data):
if filename == 'ticket.html' \
and req.path_info.startswith('/newticket'):
add_script_data(req, {'preview': 'preview' in req.args})
add_script(req, 'tt/tt_newticket.js')
add_script(req, 'tt/json2.js')
if req.locale and \
resource_exists('tickettemplate',
'htdocs/%s.js' % req.locale):
add_script(req, 'tt/%s.js' % req.locale)
return stream
# Internal methods
def _handleCustomDelete(self, req):
""" delete custom template
"""
jsonstr = urllib.unquote(req.read())
custom_data = json.loads(jsonstr)
tt_name = custom_data.get('tt_name')
if not tt_name:
return
tt_user = req.authname
# delete same custom template if exist
delete_data = {
'tt_user': tt_user,
'tt_name': tt_name,
}
TT_Template.deleteCustom(self.env, delete_data)
return tt_name
def _handleCustomSave(self, req):
""" save custom template
"""
jsonstr = urllib.unquote(req.read())
custom_data = json.loads(jsonstr)
tt_name = custom_data.get('tt_name')
custom_template = custom_data.get('custom_template')
if not tt_name or not custom_template:
return tt_name, custom_template
now = int(time.time())
tt_user = req.authname
# delete same custom template if exist
delete_data = {
'tt_user': tt_user,
'tt_name': tt_name,
}
TT_Template.deleteCustom(self.env, delete_data)
# save custom template
field_list = self._getFieldList()
for tt_field in field_list:
tt_value = custom_template.get(tt_field)
if tt_value is not None:
record = [
now,
tt_user,
tt_name,
tt_field,
tt_value,
]
TT_Template.insert(self.env, record)
return tt_name, custom_template
def _getFieldList(self):
""" Get available fields
return:
["summary", "description", ...]
"""
field_list = [field.lower() for field in self.field_list]
if 'description' not in field_list:
field_list.append('description')
return field_list
def _getTTFields(self, tt_user, tt_name):
"""
Get all fields values
return:
{
"summary": {"field_type":"text", "field_value": "abc"},
"description": {"field_type":"textarea", "field_value": "xyz"},
}
"""
result = {}
# init result
field_list = self._getFieldList()
for field in field_list:
result[field] = ''
# update from db
data = {
'tt_user': tt_user,
'tt_name': tt_name,
}
field_value_mapping = TT_Template.fetchCurrent(self.env, data)
for k, v in field_value_mapping.items():
if k in field_list:
result[k] = v
for field in field_list:
field_type = self.config.get(self.SECTION_NAME, field + '.type',
'text')
field_value = field_value_mapping.get(field)
field_detail = {
'field_type': field_type,
'field_value': field_value
}
result[field] = field_detail
return result
def _loadTemplateText(self, tt_name):
""" get template text from tt_dict.
return tt_text if found in db
or default tt_text if exists
or empty string if default not exists.
"""
tt_text = TT_Template.fetch(self.env, tt_name)
if not tt_text:
tt_text = TT_Template.fetch(self.env, 'default')
return tt_text
def _sendResponse(self, req, message):
""" send response and stop request handling
"""
req.send_response(200)
req.send_header('Cache-control', 'no-cache')
req.send_header('Expires', 'Fri, 01 Jan 1999 00:00:00 GMT')
req.send_header('Content-Type', 'text/plain' + ';charset=utf-8')
req.send_header('Content-Length',
len(isinstance(message, unicode) and
message.encode('utf-8') or message))
req.end_headers()
if req.method != 'HEAD':
req.write(message)
raise RequestDone
def _saveTemplateText(self, tt_name, tt_text):
""" save ticket template text to db.
"""
TT_Template.insert(self.env, (int(time.time()), 'SYSTEM', tt_name,
'description', tt_text))
def _getTicketTypeNames(self):
""" get ticket type names
return:
["defect", "enhancement", ..., "default"]
"""
options = []
ticket = Ticket(self.env)
for field in ticket.fields:
if field['name'] == 'type':
options.extend(field['options'])
options.extend(['default'])
return options
| 7,111 | 8,580 | 23 |
79f1e8f31fc56c489b60eecbe30ff8b8e3035435 | 1,383 | py | Python | test-sum.py | eliteraspberries/python-libnu | 869945fc3f0d4c7ebbc9a4e66f3aa6700472b0f3 | [
"0BSD"
] | null | null | null | test-sum.py | eliteraspberries/python-libnu | 869945fc3f0d4c7ebbc9a4e66f3aa6700472b0f3 | [
"0BSD"
] | null | null | null | test-sum.py | eliteraspberries/python-libnu | 869945fc3f0d4c7ebbc9a4e66f3aa6700472b0f3 | [
"0BSD"
] | null | null | null | #!/usr/bin/env python
import functools
import numpy
import hypothesis
import hypothesis.extra.numpy
import hypothesis.strategies
import libnu.sum
from test import eq
arrays = functools.partial(
hypothesis.extra.numpy.arrays,
dtype=numpy.float32,
unique=True,
)
floats = hypothesis.strategies.floats(-1.0, 1.0)
numpy.zeros = functools.partial(numpy.zeros, dtype=numpy.float32)
@hypothesis.given(arrays(shape=10, elements=floats))
@hypothesis.given(arrays(shape=10, elements=floats), floats)
@hypothesis.given(arrays(shape=100, elements=floats))
@hypothesis.given(arrays(shape=100, elements=floats))
if __name__ == '__main__':
test_sum()
test_meanvar()
test_mean()
test_var()
| 23.844828 | 65 | 0.677513 | #!/usr/bin/env python
import functools
import numpy
import hypothesis
import hypothesis.extra.numpy
import hypothesis.strategies
import libnu.sum
from test import eq
arrays = functools.partial(
hypothesis.extra.numpy.arrays,
dtype=numpy.float32,
unique=True,
)
floats = hypothesis.strategies.floats(-1.0, 1.0)
numpy.zeros = functools.partial(numpy.zeros, dtype=numpy.float32)
@hypothesis.given(arrays(shape=10, elements=floats))
def test_sum(x):
assert eq(libnu.sum.sum(x), numpy.sum(x), 1e-6)
y = numpy.copy(x[::-1])
assert eq(libnu.sum.sum(x), libnu.sum.sum(y), 1e-6)
@hypothesis.given(arrays(shape=10, elements=floats), floats)
def test_meanvar(x, a):
mean, var = libnu.sum.meanvar(x)
assert eq(mean, numpy.mean(x), 1e-6)
assert eq(var, numpy.var(x), 1e-6)
y = numpy.copy(x * a)
assert eq(libnu.sum.mean(x) * a, libnu.sum.mean(y), 1e-6)
assert eq(libnu.sum.var(x) * a * a, libnu.sum.var(y), 1e-6)
@hypothesis.given(arrays(shape=100, elements=floats))
def test_mean(x):
assert libnu.sum.mean(x) >= numpy.min(x)
assert libnu.sum.mean(x) <= numpy.max(x)
@hypothesis.given(arrays(shape=100, elements=floats))
def test_var(x):
assert libnu.sum.var(x) >= 0.0
assert libnu.sum.var(x) <= numpy.max(x) - numpy.min(x)
if __name__ == '__main__':
test_sum()
test_meanvar()
test_mean()
test_var()
| 577 | 0 | 88 |