hexsha
stringlengths 40
40
| size
int64 7
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.77
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 7
1.04M
| filtered:remove_function_no_docstring
int64 -102
942k
| filtered:remove_class_no_docstring
int64 -354
977k
| filtered:remove_delete_markers
int64 0
60.1k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9d16548fc6a8b1b86bb49107b9c13023f78ef594
| 3,051
|
py
|
Python
|
publish/tests/models.py
|
nacady/django-publish
|
a9b0b0b0ce0a2cd664d256edc4c819180dc882df
|
[
"BSD-3-Clause"
] | null | null | null |
publish/tests/models.py
|
nacady/django-publish
|
a9b0b0b0ce0a2cd664d256edc4c819180dc882df
|
[
"BSD-3-Clause"
] | null | null | null |
publish/tests/models.py
|
nacady/django-publish
|
a9b0b0b0ce0a2cd664d256edc4c819180dc882df
|
[
"BSD-3-Clause"
] | 1
|
2021-06-28T03:59:45.000Z
|
2021-06-28T03:59:45.000Z
|
from django.db import models
from datetime import datetime
from publish.models import Publishable
# publishable model with a reverse relation to
# page (as a child)
# non-publishable reverse relation to page (as a child)
update_pub_date.pub_date = datetime.now()
| 29.621359
| 74
| 0.715831
|
from django.db import models
from datetime import datetime
from publish.models import Publishable
class Site(models.Model):
title = models.CharField(max_length=100)
domain = models.CharField(max_length=100)
class FlatPage(Publishable):
url = models.CharField(max_length=100, db_index=True)
title = models.CharField(max_length=200)
content = models.TextField(blank=True)
enable_comments = models.BooleanField()
template_name = models.CharField(max_length=70, blank=True)
registration_required = models.BooleanField()
sites = models.ManyToManyField(Site)
class Meta:
ordering = ['url']
def get_absolute_url(self):
if self.is_public:
return self.url
return '%s*' % self.url
class Author(Publishable):
name = models.CharField(max_length=100)
profile = models.TextField(blank=True)
class PublishMeta(Publishable.PublishMeta):
publish_reverse_fields = ['authorprofile']
class AuthorProfile(Publishable):
author = models.OneToOneField(Author)
extra_profile = models.TextField(blank=True)
class ChangeLog(models.Model):
changed = models.DateTimeField(db_index=True, auto_now_add=True)
message = models.CharField(max_length=200)
class Tag(models.Model):
title = models.CharField(max_length=100, unique=True)
slug = models.CharField(max_length=100)
# publishable model with a reverse relation to
# page (as a child)
class PageBlock(Publishable):
page = models.ForeignKey('Page')
content = models.TextField(blank=True)
# non-publishable reverse relation to page (as a child)
class Comment(models.Model):
page = models.ForeignKey('Page')
comment = models.TextField()
def update_pub_date(page, field_name, value):
# ignore value entirely and replace with now
setattr(page, field_name, update_pub_date.pub_date)
update_pub_date.pub_date = datetime.now()
class Page(Publishable):
slug = models.CharField(max_length=100, db_index=True)
title = models.CharField(max_length=200)
content = models.TextField(blank=True)
pub_date = models.DateTimeField(default=datetime.now)
parent = models.ForeignKey('self', blank=True, null=True)
authors = models.ManyToManyField(Author, blank=True)
log = models.ManyToManyField(ChangeLog, blank=True)
tags = models.ManyToManyField(Tag, through='PageTagOrder', blank=True)
class Meta:
ordering = ['slug']
class PublishMeta(Publishable.PublishMeta):
publish_exclude_fields = ['log']
publish_reverse_fields = ['pageblock_set']
publish_functions = {'pub_date': update_pub_date}
def get_absolute_url(self):
if not self.parent:
return u'/%s/' % self.slug
return '%s%s/' % (self.parent.get_absolute_url(), self.slug)
class PageTagOrder(Publishable):
# note these are named in non-standard way to
# ensure we are getting correct names
tagged_page = models.ForeignKey(Page)
page_tag = models.ForeignKey(Tag)
tag_order = models.IntegerField()
| 364
| 2,160
| 251
|
772b21e88da8f6ee452593fcfccc34cec501a301
| 1,226
|
py
|
Python
|
flower/db.py
|
guhaiqiao/Flower_app
|
eae9b6ce066544e8b505c98d202527d86cea9357
|
[
"MIT"
] | 1
|
2020-12-14T01:48:20.000Z
|
2020-12-14T01:48:20.000Z
|
flower/db.py
|
guhaiqiao/Flower_app
|
eae9b6ce066544e8b505c98d202527d86cea9357
|
[
"MIT"
] | null | null | null |
flower/db.py
|
guhaiqiao/Flower_app
|
eae9b6ce066544e8b505c98d202527d86cea9357
|
[
"MIT"
] | null | null | null |
import sqlite3
import glob
import os
import click
from flask import current_app, g
from flask.cli import with_appcontext
@click.command('init-db')
@with_appcontext
| 23.576923
| 72
| 0.615824
|
import sqlite3
import glob
import os
import click
from flask import current_app, g
from flask.cli import with_appcontext
def get_db():
if 'db' not in g:
g.db = sqlite3.connect(current_app.config['DATABASE'],
detect_types=sqlite3.PARSE_DECLTYPES)
g.db.row_factory = sqlite3.Row
return g.db
def close_db(e=None):
db = g.pop('db', None)
if db is not None:
db.close()
def init_db():
db = get_db()
paths = [
'/image/user_image', '/image/flower_image', '/image/blog_image',
'/image/index_image'
]
for path in paths:
if not os.path.exists(os.getcwd() + path):
os.mkdir(os.getcwd() + path)
for picture in glob.glob(os.getcwd() + '/image/*/*.jpg'):
print(picture.split('/')[-1])
if picture.split('/')[-1] != 'default.jpg':
os.remove(picture)
with current_app.open_resource('schema.sql') as f:
db.executescript(f.read().decode('utf8'))
@click.command('init-db')
@with_appcontext
def init_db_command():
init_db()
click.echo('Initialized the database')
def init_app(app):
app.teardown_appcontext(close_db)
app.cli.add_command(init_db_command)
| 942
| 0
| 114
|
c4bcfd12173f327f06cebc80aa483d7df62edc93
| 3,151
|
py
|
Python
|
tests/test_ddg_global_var_dependencies.py
|
Kyle-Kyle/angr
|
345b2131a7a67e3a6ffc7d9fd475146a3e12f837
|
[
"BSD-2-Clause"
] | 6,132
|
2015-08-06T23:24:47.000Z
|
2022-03-31T21:49:34.000Z
|
tests/test_ddg_global_var_dependencies.py
|
Kyle-Kyle/angr
|
345b2131a7a67e3a6ffc7d9fd475146a3e12f837
|
[
"BSD-2-Clause"
] | 2,272
|
2015-08-10T08:40:07.000Z
|
2022-03-31T23:46:44.000Z
|
tests/test_ddg_global_var_dependencies.py
|
Kyle-Kyle/angr
|
345b2131a7a67e3a6ffc7d9fd475146a3e12f837
|
[
"BSD-2-Clause"
] | 1,155
|
2015-08-06T23:37:39.000Z
|
2022-03-31T05:54:11.000Z
|
import os
import angr
import nose
test_location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../binaries/tests'))
arches = {'x86_64'}
if __name__ == "__main__":
main()
| 43.164384
| 171
| 0.720406
|
import os
import angr
import nose
test_location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../binaries/tests'))
arches = {'x86_64'}
def main():
test_ddg_global_var_dependencies()
def test_ddg_global_var_dependencies():
for arch in arches:
run_ddg_global_var_dependencies(arch)
def run_ddg_global_var_dependencies(arch):
test_file = os.path.join(test_location, arch, 'ddg_global_var_dependencies')
proj = angr.Project(test_file, auto_load_libs=False)
cfg = proj.analyses.CFGEmulated(context_sensitivity_level=2, keep_state=True, state_add_options=angr.sim_options.refs)
ddg = proj.analyses.DDG(cfg)
main_func = cfg.functions.function(name='main')
target_block_addr = main_func.ret_sites[0].addr
target_block = proj.factory.block(addr=target_block_addr)
tgt_stmt_idx, tgt_stmt = get_target_stmt(proj, target_block)
assert tgt_stmt_idx is not None
buf_addr = tgt_stmt.data.addr.con.value
tgt_ddg_node = get_ddg_node(ddg, target_block_addr, tgt_stmt_idx)
assert tgt_ddg_node is not None
# Whether the target depends on the statement assigning 'b' to the global variable
has_correct_dependency = False
for pred in ddg.get_predecessors(tgt_ddg_node):
pred_block = proj.factory.block(addr=pred.block_addr)
stmt = pred_block.vex.statements[pred.stmt_idx]
has_correct_dependency |= check_dependency(stmt, buf_addr, ord('b'))
# If the target depends on the statement assigning 'a' to the global variable, it is underconstrained (this assignment should be overwritten by the 'b' assignment)
nose.tools.assert_false(check_dependency(stmt, buf_addr, ord('a')), msg="Target statement has incorrect dependency (DDG is underconstrained)")
nose.tools.assert_true(has_correct_dependency, msg='Target statement does not have correct dependency (DDG is overconstrained)')
def check_dependency(stmt, addr, const):
# Check if we are storing a constant to a variable with constant address
if stmt.tag == 'Ist_Store' and stmt.addr.tag == 'Iex_Const' and stmt.data.tag == 'Iex_Const':
# Check if we are storing the specified constant to the specified variable address
if stmt.addr.con.value == addr and stmt.data.con.value == const:
return True
return False
def get_ddg_node(ddg, block_addr, stmt_idx):
for node in ddg.graph.nodes:
if node.block_addr == block_addr and node.stmt_idx == stmt_idx:
return node
return None
def get_target_stmt(proj, block):
for i, stmt in enumerate(block.vex.statements):
# We're looking for the instruction that loads a constant memory address into a temporary variable
if stmt.tag == 'Ist_WrTmp' and stmt.data.tag == 'Iex_Load' and stmt.data.addr.tag == 'Iex_Const':
addr = stmt.data.addr.con.value
section = proj.loader.main_object.find_section_containing(addr)
# Confirm the memory address is in the uninitialized data section
if section.name == '.bss':
return i, stmt
return None, None
if __name__ == "__main__":
main()
| 2,812
| 0
| 138
|
83b0710d125addf1a454b4ea6976092a23001346
| 930
|
py
|
Python
|
src/IO.py
|
Rahoo11/Jarvis
|
6fac03e6f7bb963d0632ec781323210b3379603b
|
[
"MIT"
] | null | null | null |
src/IO.py
|
Rahoo11/Jarvis
|
6fac03e6f7bb963d0632ec781323210b3379603b
|
[
"MIT"
] | null | null | null |
src/IO.py
|
Rahoo11/Jarvis
|
6fac03e6f7bb963d0632ec781323210b3379603b
|
[
"MIT"
] | null | null | null |
from datetime import datetime
import logging
# LOGGING SETTINGS
# Save detailed information to log file
handler_file = logging.FileHandler("jarvis.log")
handler_file.setFormatter(logging.Formatter(
"%(asctime)s %(levelname)s %(filename)s:%(lineno)d - %(message)s",
"%Y-%m-%d %H:%M:%S"
))
# Output simple information to stderr
handler_stderr = logging.StreamHandler()
handler_stderr.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
# Log everything of level INFO or higher (everything apart from DEBUG)
logging.basicConfig(
level=logging.INFO,
handlers=[
handler_file,
handler_stderr
]
)
# END LOGGING SETTINGS
def stdin() -> str:
"""
Use this to input commands for Jarvis if the desired way fails
"""
return input("Command: ")
def stdout(response: str):
"""
Use this to output Jarvis's response if the desired way fails
"""
print(response)
| 22.682927
| 76
| 0.691398
|
from datetime import datetime
import logging
# LOGGING SETTINGS
# Save detailed information to log file
handler_file = logging.FileHandler("jarvis.log")
handler_file.setFormatter(logging.Formatter(
"%(asctime)s %(levelname)s %(filename)s:%(lineno)d - %(message)s",
"%Y-%m-%d %H:%M:%S"
))
# Output simple information to stderr
handler_stderr = logging.StreamHandler()
handler_stderr.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
# Log everything of level INFO or higher (everything apart from DEBUG)
logging.basicConfig(
level=logging.INFO,
handlers=[
handler_file,
handler_stderr
]
)
# END LOGGING SETTINGS
def stdin() -> str:
"""
Use this to input commands for Jarvis if the desired way fails
"""
return input("Command: ")
def stdout(response: str):
"""
Use this to output Jarvis's response if the desired way fails
"""
print(response)
| 0
| 0
| 0
|
6bf254e4d47110abc5fa56df01806709a669c1dd
| 8,744
|
py
|
Python
|
sfo.py
|
ayassinsayed/py.dataformat.sfo
|
99b2ad11b162318f7e5251a760bd5b53e1cf826d
|
[
"MIT"
] | 1
|
2021-09-06T04:27:13.000Z
|
2021-09-06T04:27:13.000Z
|
sfo.py
|
Jasily/py.dataformat.sfo
|
99b2ad11b162318f7e5251a760bd5b53e1cf826d
|
[
"MIT"
] | null | null | null |
sfo.py
|
Jasily/py.dataformat.sfo
|
99b2ad11b162318f7e5251a760bd5b53e1cf826d
|
[
"MIT"
] | 4
|
2017-10-28T18:31:00.000Z
|
2021-01-26T00:24:18.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 - cologler <skyoflw@gmail.com>
# ----------
#
# ----------
import io
__all__ = [
'FormatError',
'SfoFile',
'PSVGameSfo',
'PSPGameSfo',
]
_BYTE_ORDER = 'little'
if __name__ == '__main__':
for i in range(0, 1):
test(r'test_res\param_%s.sfo' % str(i).rjust(2, '0'))
| 28.763158
| 98
| 0.589776
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 - cologler <skyoflw@gmail.com>
# ----------
#
# ----------
import io
__all__ = [
'FormatError',
'SfoFile',
'PSVGameSfo',
'PSPGameSfo',
]
class FormatError(Exception):
pass
_BYTE_ORDER = 'little'
class Header:
def __init__(self):
# uint32_t magic; Always PSF
# uint32_t version; Usually 1.1
# uint32_t key_table_start; Start offset of key_table
# uint32_t data_table_start; Start offset of data_table
# uint32_t tables_entries; Number of entries in all tables
self._magic = None
self._version = None
self._key_table_start = None
self._data_table_start = None
self._tables_entries = None
@property
def key_table_start(self):
return self._key_table_start
@property
def data_table_start(self):
return self._data_table_start
@property
def tables_entries(self):
return self._tables_entries
def fix_data(self, sfo):
self._tables_entries = len(sfo)
raise NotImplementedError
def from_reader(self, reader):
self._magic = reader.read(4)
self._version = reader.read(4)
self._key_table_start = int.from_bytes(reader.read(4), _BYTE_ORDER)
self._data_table_start = int.from_bytes(reader.read(4), _BYTE_ORDER)
self._tables_entries = int.from_bytes(reader.read(4), _BYTE_ORDER)
if self._magic != b'\x00PSF':
raise FormatError
return self
class IndexTableEntry:
FORMAT_UTF8S = b'\x04\x00'
'''utf8 character string, NULL terminated'''
FORMAT_UTF8 = b'\x04\x02'
'''
Allways has a length of 4 bytes in len and max_len
(even in the case some bytes are not used, all them are marked as used)
'''
FORMAT_INT32 = b'\x04\x04'
def __init__(self):
# uint16_t key_offset; param_key offset (relative to start offset of key_table) */
# uint16_t data_fmt; param_data data type */
# uint32_t data_len; param_data used bytes */
# uint32_t data_max_len; param_data total bytes */
# uint32_t data_offset; param_data offset (relative to start offset of data_table) */
self._key_offset = None
self._data_fmt = None
self._data_len = None
self._data_max_len = None
self._data_offset = None
@property
def key_offset(self):
return self._key_offset
@property
def data_fmt(self):
return self._data_fmt
@property
def data_len(self):
return self._data_len
@property
def data_offset(self):
return self._data_offset
@property
def data_max_len(self):
return self._data_max_len
def fix_data(self, data):
raise NotImplementedError
def from_reader(self, reader):
self._key_offset = int.from_bytes(reader.read(2), _BYTE_ORDER)
self._data_fmt = reader.read(2)
self._data_len = int.from_bytes(reader.read(4), _BYTE_ORDER)
self._data_max_len = int.from_bytes(reader.read(4), _BYTE_ORDER)
self._data_offset = int.from_bytes(reader.read(4), _BYTE_ORDER)
if self._data_fmt != self.FORMAT_UTF8 and\
self._data_fmt != self.FORMAT_INT32 and\
self._data_fmt != self.FORMAT_UTF8S:
print(self._data_fmt)
raise FormatError
class Data:
def __init__(self):
self._index_table_entry = IndexTableEntry()
self._key = None
self._value = None
@property
def index_table_entry(self):
return self._index_table_entry
@property
def key(self):
return self._key
@property
def value(self):
return self._value
def fix_data(self):
self._index_table_entry.fix_data(self)
raise NotImplementedError
def __seek(self, reader, offset):
pos = reader.tell()
if pos != offset:
reader.seek(offset)
def key_from_reader(self, reader, header):
offset = header.key_table_start + self._index_table_entry.key_offset
self.__seek(reader, offset)
buffer = b''
while True:
b = reader.read(1)
if b == b'\x00':
break
buffer += b
self._key = buffer.decode('utf8')
def value_from_reader(self, reader, header):
offset = header.data_table_start + self._index_table_entry.data_offset
self.__seek(reader, offset)
buffer = reader.read(self._index_table_entry.data_max_len)
if self._index_table_entry.data_fmt == IndexTableEntry.FORMAT_UTF8:
i = buffer.find(b'\x00')
assert i >= 0
buffer = buffer[:i]
self._value = buffer.decode('utf8')
elif self._index_table_entry.data_fmt == IndexTableEntry.FORMAT_INT32:
assert len(buffer) == 4
self._value = int.from_bytes(buffer, _BYTE_ORDER)
else:
raise NotImplementedError
class SfoFile:
def __init__(self, header, data):
assert isinstance(header, Header)
self._header = header
self._data = {}
for d in data:
self._data[d.key] = d
def __contains__(self, key):
return key in self._data
def __getitem__(self, key):
return self._data[key].value
def __setitem__(self, key, value):
raise NotImplementedError
def __delitem__(self, key):
raise NotImplementedError
def __len__(self):
return len(self._data)
def keys(self):
return self._data.keys()
def values(self):
return self._data.values()
def get_or_None(self, key):
r = self._data.get(key, None)
return None if r == None else r.value
def _fix_data(self):
for v in self.values():
v.fix_data()
self._header.fix_data(self)
raise NotImplementedError
@staticmethod
def from_reader(reader):
header = Header().from_reader(reader)
datas = [Data() for _ in range(0, header.tables_entries)]
for d in datas:
d.index_table_entry.from_reader(reader)
for d in datas:
d.key_from_reader(reader, header)
for d in datas:
d.value_from_reader(reader, header)
sfo = SfoFile(header, datas)
return sfo
@staticmethod
def from_bytes(buffer):
return SfoFile.from_reader(io.BytesIO(buffer))
class _Loader:
def __init__(self, sfo: SfoFile, key):
self._sfo = sfo
self._key = key
self._value = None
self._is_loaded = False
def refresh(self):
self._is_loaded = False
@property
def value(self):
if not self._is_loaded:
self._value = self._sfo.get_or_None(self._key)
self._is_loaded = True
return self._value
class SfoInfoWrapper:
def __init__(self, sfo):
self._sfo = sfo
self._cache = {}
@classmethod
def from_bytes(cls, buffer):
return cls(SfoFile.from_reader(io.BytesIO(buffer)))
def refresh(self):
for value in self._cache.values():
value.refresh()
def _get_value(self, key):
loader = self._cache.get(key)
if loader == None:
loader = _Loader(self._sfo, key)
self._cache[key] = loader
return loader.value
@property
def app_ver(self): return self._get_value('APP_VER')
@property
def category(self): return self._get_value('CATEGORY')
@property
def title(self): return self._get_value('TITLE')
class PSVGameSfo(SfoInfoWrapper):
@property
def content_id(self): return self._get_value('CONTENT_ID')
@property
def title_id(self): return self._get_value('TITLE_ID')
class PSPGameSfo(SfoInfoWrapper):
@property
def disc_id(self): return self._get_value('DISC_ID')
@property
def category(self): return self._get_value('CATEGORY')
def test(path):
with open(path, mode='rb') as reader:
sfo = SfoFile.from_reader(reader)
for k in sfo._data:
v = sfo._data[k]
print('%s: "%s"' % (v._key, v._value))
if __name__ == '__main__':
for i in range(0, 1):
test(r'test_res\param_%s.sfo' % str(i).rjust(2, '0'))
| 6,074
| 2,035
| 242
|
73e9bc79d0b58408169e58a0a67fb34a83f478ad
| 490
|
py
|
Python
|
bluebottle/test/factory_models/payments.py
|
maykinmedia/bluebottle
|
355d4729662b5e9a03398efb4fe882e0f8cfa28d
|
[
"BSD-3-Clause"
] | null | null | null |
bluebottle/test/factory_models/payments.py
|
maykinmedia/bluebottle
|
355d4729662b5e9a03398efb4fe882e0f8cfa28d
|
[
"BSD-3-Clause"
] | null | null | null |
bluebottle/test/factory_models/payments.py
|
maykinmedia/bluebottle
|
355d4729662b5e9a03398efb4fe882e0f8cfa28d
|
[
"BSD-3-Clause"
] | null | null | null |
import factory
from bluebottle.payments.models import Payment, OrderPayment
from bluebottle.payments_logger.models import PaymentLogEntry
from .orders import OrderFactory
| 24.5
| 61
| 0.804082
|
import factory
from bluebottle.payments.models import Payment, OrderPayment
from bluebottle.payments_logger.models import PaymentLogEntry
from .orders import OrderFactory
class OrderPaymentFactory(factory.DjangoModelFactory):
FACTORY_FOR = OrderPayment
payment_method = 'mock'
amount = 100
order = factory.SubFactory(OrderFactory)
class PaymentFactory(factory.DjangoModelFactory):
FACTORY_FOR = Payment
order_payment = factory.SubFactory(OrderPaymentFactory)
| 0
| 270
| 46
|
39765b3fa03cc18cbd68dd8b22b2a3c60009bf92
| 2,667
|
py
|
Python
|
tests/feeds/test_matic_usd_feed.py
|
tellor-io/telliot-feed-examples
|
3f825c90ad372f42c89eee0f5b54250f22ec0728
|
[
"MIT"
] | 7
|
2021-11-10T21:14:57.000Z
|
2022-03-26T07:27:23.000Z
|
tests/feeds/test_matic_usd_feed.py
|
tellor-io/telliot-feed-examples
|
3f825c90ad372f42c89eee0f5b54250f22ec0728
|
[
"MIT"
] | 86
|
2021-11-09T13:12:58.000Z
|
2022-03-31T17:28:56.000Z
|
tests/feeds/test_matic_usd_feed.py
|
tellor-io/telliot-feed-examples
|
3f825c90ad372f42c89eee0f5b54250f22ec0728
|
[
"MIT"
] | 2
|
2021-11-27T12:51:22.000Z
|
2022-03-12T16:38:00.000Z
|
import pytest
from telliot_feed_examples.feeds.matic_usd_feed import matic_usd_median_feed
@pytest.mark.asyncio
| 115.956522
| 1,381
| 0.813648
|
import pytest
from telliot_feed_examples.feeds.matic_usd_feed import matic_usd_median_feed
@pytest.mark.asyncio
async def test_fetch_price():
(value, _) = await matic_usd_median_feed.source.fetch_new_datapoint()
assert value > 0
print(value)
def test_query_info():
q = matic_usd_median_feed.query
exp_id = "40aa71e5205fdc7bdb7d65f7ae41daca3820c5d3a8f62357a99eda3aa27244a3"
exp_data = b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\tSpotPrice\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05matic\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03usd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # noqa: E501
exp_data_hex = "00000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000953706f745072696365000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000056d6174696300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000037573640000000000000000000000000000000000000000000000000000000000" # noqa: E501
# print(q.query_data)
assert q.query_data == exp_data
assert q.query_id.hex() == exp_id
assert q.query_data.hex() == exp_data_hex
| 2,506
| 0
| 45
|
2f90e72ab2ad376594d32a0c909e3065372a297e
| 1,066
|
py
|
Python
|
motelsAPI/settings/dev.py
|
amartinez1/5letrasAPI
|
670b638a8254a0809c9f953350cd1a3264b61bf7
|
[
"MIT"
] | 2
|
2015-05-02T12:30:22.000Z
|
2015-05-08T18:13:43.000Z
|
motelsAPI/settings/dev.py
|
amartinez1/5letrasAPI
|
670b638a8254a0809c9f953350cd1a3264b61bf7
|
[
"MIT"
] | null | null | null |
motelsAPI/settings/dev.py
|
amartinez1/5letrasAPI
|
670b638a8254a0809c9f953350cd1a3264b61bf7
|
[
"MIT"
] | null | null | null |
from .base import *
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'motels_db',
}
}
ALLOWED_HOSTS = []
CORS_ORIGIN_ALLOW_ALL = True
DEBUG = True
SECRET_KEY = 'test'
INSTALLED_APPS += (
'autofixture',
'debug_toolbar',
'django_extensions',
)
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
REST_FRAMEWORK = {
'DEFAULT_FILTER_BACKENDS': ('rest_framework.filters.DjangoFilterBackend',),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.AllowAny',
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PAGINATION_CLASS':
'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 10,
}
| 23.688889
| 80
| 0.661351
|
from .base import *
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'motels_db',
}
}
ALLOWED_HOSTS = []
CORS_ORIGIN_ALLOW_ALL = True
DEBUG = True
SECRET_KEY = 'test'
INSTALLED_APPS += (
'autofixture',
'debug_toolbar',
'django_extensions',
)
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
REST_FRAMEWORK = {
'DEFAULT_FILTER_BACKENDS': ('rest_framework.filters.DjangoFilterBackend',),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.AllowAny',
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PAGINATION_CLASS':
'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 10,
}
| 0
| 0
| 0
|
a1adb53a7219e0575c94c4f8e32bc32af0a24a42
| 955
|
py
|
Python
|
snooper.py
|
boztalay/SuperconCubeCmd
|
9cbd685a75dbf9fdf7a04e7a240b07117b1fbe82
|
[
"MIT"
] | null | null | null |
snooper.py
|
boztalay/SuperconCubeCmd
|
9cbd685a75dbf9fdf7a04e7a240b07117b1fbe82
|
[
"MIT"
] | null | null | null |
snooper.py
|
boztalay/SuperconCubeCmd
|
9cbd685a75dbf9fdf7a04e7a240b07117b1fbe82
|
[
"MIT"
] | null | null | null |
import sys
import cubey
if __name__ == "__main__":
if len(sys.argv) != 2:
print "Gimme a serial port!"
sys.exit(1)
serialPort = sys.argv[1]
main(serialPort)
| 23.292683
| 93
| 0.536126
|
import sys
import cubey
def main(serialPort):
cube = cubey.Cube(serialPort)
print "Listening, Ctrl-C to stop..."
try:
while True:
rawMessage = cube.sendCommand("m n u")
printMessage(rawMessage)
except KeyboardInterrupt:
print
cube.breakOut()
print "Done!"
def printMessage(rawMessage):
print
print "Got a message!"
print "=============="
print
contents = map(int, rawMessage.split())
rowFormat = "% 4X |" + (" %02X" * 16)
print " 0 1 2 3 4 5 6 7 8 9 A B C D E F"
print " ------------------------------------------------"
for rowStartIndex in range(0, 512, 16):
print rowFormat % tuple([rowStartIndex] + contents[rowStartIndex:rowStartIndex + 16])
if __name__ == "__main__":
if len(sys.argv) != 2:
print "Gimme a serial port!"
sys.exit(1)
serialPort = sys.argv[1]
main(serialPort)
| 721
| 0
| 46
|
7ffc97d0a4c41aca77fb73ffa2a8a35b537492b9
| 3,841
|
py
|
Python
|
src/SgFactory/table.py
|
WDonegan/psg-factories
|
5a6e362d9159a0d5c82960d9e2e7d446f1ab013d
|
[
"MIT"
] | null | null | null |
src/SgFactory/table.py
|
WDonegan/psg-factories
|
5a6e362d9159a0d5c82960d9e2e7d446f1ab013d
|
[
"MIT"
] | null | null | null |
src/SgFactory/table.py
|
WDonegan/psg-factories
|
5a6e362d9159a0d5c82960d9e2e7d446f1ab013d
|
[
"MIT"
] | null | null | null |
import PySimpleGUI as sg
from .base import GeneratorBase
| 43.157303
| 60
| 0.548555
|
import PySimpleGUI as sg
from .base import GeneratorBase
class Table(GeneratorBase):
VALUES = 'values'
HEADINGS = 'headings'
VISIBLE_COLUMN_MAP = 'visible_column_map'
COL_WIDTHS = 'col_widths'
DEF_COL_WIDTH = 'def_col_width'
AUTO_SIZE_COLUMNS = 'auto_size_columns'
MAX_COL_WIDTH = 'max_col_width'
SELECT_MODE = 'select_mode'
DISPLAY_ROW_NUMBERS = 'display_row_numbers'
NUM_ROWS = 'num_rows'
ROW_HEIGHT = 'row_height'
FONT = 'font'
JUSTIFICATION = 'justification'
TEXT_COLOR = 'text_color'
BACKGROUND_COLOR = 'background_color'
ALTERNATING_ROW_COLOR = 'alternating_row_color'
SELECTED_ROW_COLORS = 'selected_row_colors'
HEADER_TEXT_COLOR = 'header_text_color'
HEADER_BACKGROUND_COLOR = 'header_background_color'
HEADER_FONT = 'header_font'
ROW_COLORS = 'row_colors'
VERTICAL_SCROLL_ONLY = 'vertical_scroll_only'
HIDE_VERTICAL_SCROLL = 'hide_vertical_scroll'
SIZE = 'size'
CHANGE_SUBMITS = 'change_submits'
ENABLE_EVENTS = 'enable_events'
ENABLE_CLICK_EVENTS = 'enable_click_events'
RIGHT_CLICK_SELECTS = 'right_click_selects'
BIND_RETURN_KEY = 'bind_return_key'
PAD = 'pad'
KEY = 'key'
TOOLTIP = 'tooltip'
RIGHT_CLICK_MENU = 'right_click_menu'
EXPAND_X = 'expand_x'
EXPAND_Y = 'expand_y'
VISIBLE = 'visible'
METADATA = 'metadata'
def reset_params(self):
self.__parameters__ = {
self.VALUES: (False, None),
self.HEADINGS: (False, None),
self.VISIBLE_COLUMN_MAP: (False, None),
self.COL_WIDTHS: (False, None),
self.DEF_COL_WIDTH: (False, None),
self.AUTO_SIZE_COLUMNS: (False, None),
self.MAX_COL_WIDTH: (False, None),
self.SELECT_MODE: (False, None),
self.DISPLAY_ROW_NUMBERS: (False, None),
self.NUM_ROWS: (False, None),
self.ROW_HEIGHT: (False, None),
self.FONT: (False, None),
self.JUSTIFICATION: (False, None),
self.TEXT_COLOR: (False, None),
self.BACKGROUND_COLOR: (False, None),
self.ALTERNATING_ROW_COLOR: (False, None),
self.SELECTED_ROW_COLORS: (False, None),
self.HEADER_TEXT_COLOR: (False, None),
self.HEADER_BACKGROUND_COLOR: (False, None),
self.HEADER_FONT: (False, None),
self.ROW_COLORS: (False, None),
self.VERTICAL_SCROLL_ONLY: (False, None),
self.HIDE_VERTICAL_SCROLL: (False, None),
self.SIZE: (False, None),
self.CHANGE_SUBMITS: (False, None),
self.ENABLE_EVENTS: (False, None),
self.ENABLE_CLICK_EVENTS: (False, None),
self.RIGHT_CLICK_SELECTS: (False, None),
self.BIND_RETURN_KEY: (False, None),
self.PAD: (False, None),
self.KEY: (False, None),
self.TOOLTIP: (False, None),
self.RIGHT_CLICK_MENU: (False, None),
self.EXPAND_X: (False, None),
self.EXPAND_Y: (False, None),
self.VISIBLE: (False, None),
self.METADATA: (False, None),
}
def make(self, key: str, param_key: str = None):
self.__parameters__[self.KEY] = (True, key)
active_params: dict = self.__get_params__(param_key)
return sg.Table(**active_params)
| 2,408
| 1,352
| 23
|
9e5764903cdf85638ab62747d681b0695238c4e3
| 1,411
|
py
|
Python
|
day-9&10/main.py
|
a18antsv/Python-Two-Week-Challenge
|
cfdefe5e2643d1c1ee66d08a16a7ffc175ba1a3a
|
[
"MIT"
] | null | null | null |
day-9&10/main.py
|
a18antsv/Python-Two-Week-Challenge
|
cfdefe5e2643d1c1ee66d08a16a7ffc175ba1a3a
|
[
"MIT"
] | null | null | null |
day-9&10/main.py
|
a18antsv/Python-Two-Week-Challenge
|
cfdefe5e2643d1c1ee66d08a16a7ffc175ba1a3a
|
[
"MIT"
] | null | null | null |
import requests
from flask import Flask, render_template, request, redirect
base_url = "http://hn.algolia.com/api/v1"
# This URL gets the newest stories.
new = f"{base_url}/search_by_date?tags=story"
# This URL gets the most popular stories
popular = f"{base_url}/search?tags=story"
# This function makes the URL to get the detail of a storie by id.
# Heres the documentation: https://hn.algolia.com/api
db = {}
app = Flask("DayNine")
@app.route("/")
@app.route("/<id>")
app.run(host="0.0.0.0")
| 24.754386
| 70
| 0.690291
|
import requests
from flask import Flask, render_template, request, redirect
base_url = "http://hn.algolia.com/api/v1"
# This URL gets the newest stories.
new = f"{base_url}/search_by_date?tags=story"
# This URL gets the most popular stories
popular = f"{base_url}/search?tags=story"
# This function makes the URL to get the detail of a storie by id.
# Heres the documentation: https://hn.algolia.com/api
def make_detail_url(id):
return f"{base_url}/items/{id}"
db = {}
app = Flask("DayNine")
@app.route("/")
def index():
allowed_orders = ("popular", "new")
order_by = request.args.get("order_by")
if order_by:
order_by = order_by.lower()
if order_by not in allowed_orders:
order_by = allowed_orders[0]
posts_from_db = db.get(order_by)
if posts_from_db:
posts = posts_from_db
else:
posts = requests.get(globals()[order_by]).json()["hits"]
db[order_by] = posts
return render_template("index.html", order_by=order_by, posts=posts)
@app.route("/<id>")
def detail(id):
try:
request = requests.get(make_detail_url(id))
request.raise_for_status()
except requests.exceptions.HTTPError:
return redirect("/")
post = request.json()
return render_template(
"detail.html",
title=post.get("title"),
url=post.get("url"),
points=post.get("points"),
author=post.get("author"),
comments=post.get("children")
)
app.run(host="0.0.0.0")
| 842
| 0
| 66
|
d32135b6fdf1615d5e0b4352267bf443c9e38704
| 2,651
|
py
|
Python
|
feewaiver/urls.py
|
dbca-wa/feewaiver
|
7938a0e9d18924c12b27c0a411b6d7eccb40166b
|
[
"Apache-2.0"
] | null | null | null |
feewaiver/urls.py
|
dbca-wa/feewaiver
|
7938a0e9d18924c12b27c0a411b6d7eccb40166b
|
[
"Apache-2.0"
] | 12
|
2021-02-24T02:33:01.000Z
|
2022-01-25T02:37:39.000Z
|
feewaiver/urls.py
|
mintcoding/feewaiver
|
47d69db91386f760dd36d87cbb565a9bb72a27d5
|
[
"Apache-2.0"
] | 1
|
2021-01-08T02:15:27.000Z
|
2021-01-08T02:15:27.000Z
|
from django.conf import settings
from django.contrib import admin
from django.conf.urls import url, include
from django.conf.urls.static import static
from rest_framework import routers
#from feewaiver import views, users_api, api
from feewaiver import views, api
from ledger.urls import urlpatterns as ledger_patterns
from feewaiver.utils import are_migrations_running
# API patterns
router = routers.DefaultRouter()
router.register(r'feewaivers',api.FeeWaiverViewSet)
router.register(r'feewaivers_paginated',api.FeeWaiverPaginatedViewSet)
router.register(r'participants',api.ParticipantsViewSet)
router.register(r'parks',api.ParkViewSet)
router.register(r'campgrounds',api.CampGroundViewSet)
router.register(r'temporary_document', api.TemporaryDocumentCollectionViewSet)
api_patterns = [
#url(r'^api/profile$', users_api.GetProfile.as_view(), name='get-profile'),
#url(r'^api/department_users$', users_api.DepartmentUserList.as_view(), name='department-users-list'),
#url(r'^api/filtered_users$', users_api.UserListFilterView.as_view(), name='filtered_users'),
url(r'^api/',include(router.urls)),
]
# URL Patterns
urlpatterns = [
url(r'^ledger/admin/', admin.site.urls, name='ledger_admin'),
url(r'', include(api_patterns)),
url(r'^$', views.FeeWaiverRoutingView.as_view(), name='ds_home'),
url(r'^contact/', views.FeeWaiverContactView.as_view(), name='ds_contact'),
url(r'^admin_data/', views.FeeWaiverAdminDataView.as_view(), name='admin_data'),
url(r'^further_info/', views.FeeWaiverFurtherInformationView.as_view(), name='ds_further_info'),
url(r'^internal/', views.InternalView.as_view(), name='internal'),
url(r'^external/', views.ExternalView.as_view(), name='external'),
url(r'^account/$', views.ExternalView.as_view(), name='manage-account'),
url(r'^profiles/', views.ExternalView.as_view(), name='manage-profiles'),
url(r'^help/(?P<application_type>[^/]+)/(?P<help_type>[^/]+)/$', views.HelpView.as_view(), name='help'),
url(r'^mgt-commands/$', views.ManagementCommandsView.as_view(), name='mgt-commands'),
url(r'^internal/fee_waiver/(?P<feewaiver_pk>\d+)/$', views.InternalFeeWaiverView.as_view(), name='internal-feewaiver-detail'),
url(r'^history/fee_waiver/(?P<pk>\d+)/$', views.FeeWaiverHistoryCompareView.as_view(), name='feewaiver_history'),
] + ledger_patterns
if settings.DEBUG: # Serve media locally in development.
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.SHOW_DEBUG_TOOLBAR:
import debug_toolbar
urlpatterns = [
url('__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
| 48.2
| 130
| 0.744247
|
from django.conf import settings
from django.contrib import admin
from django.conf.urls import url, include
from django.conf.urls.static import static
from rest_framework import routers
#from feewaiver import views, users_api, api
from feewaiver import views, api
from ledger.urls import urlpatterns as ledger_patterns
from feewaiver.utils import are_migrations_running
# API patterns
router = routers.DefaultRouter()
router.register(r'feewaivers',api.FeeWaiverViewSet)
router.register(r'feewaivers_paginated',api.FeeWaiverPaginatedViewSet)
router.register(r'participants',api.ParticipantsViewSet)
router.register(r'parks',api.ParkViewSet)
router.register(r'campgrounds',api.CampGroundViewSet)
router.register(r'temporary_document', api.TemporaryDocumentCollectionViewSet)
api_patterns = [
#url(r'^api/profile$', users_api.GetProfile.as_view(), name='get-profile'),
#url(r'^api/department_users$', users_api.DepartmentUserList.as_view(), name='department-users-list'),
#url(r'^api/filtered_users$', users_api.UserListFilterView.as_view(), name='filtered_users'),
url(r'^api/',include(router.urls)),
]
# URL Patterns
urlpatterns = [
url(r'^ledger/admin/', admin.site.urls, name='ledger_admin'),
url(r'', include(api_patterns)),
url(r'^$', views.FeeWaiverRoutingView.as_view(), name='ds_home'),
url(r'^contact/', views.FeeWaiverContactView.as_view(), name='ds_contact'),
url(r'^admin_data/', views.FeeWaiverAdminDataView.as_view(), name='admin_data'),
url(r'^further_info/', views.FeeWaiverFurtherInformationView.as_view(), name='ds_further_info'),
url(r'^internal/', views.InternalView.as_view(), name='internal'),
url(r'^external/', views.ExternalView.as_view(), name='external'),
url(r'^account/$', views.ExternalView.as_view(), name='manage-account'),
url(r'^profiles/', views.ExternalView.as_view(), name='manage-profiles'),
url(r'^help/(?P<application_type>[^/]+)/(?P<help_type>[^/]+)/$', views.HelpView.as_view(), name='help'),
url(r'^mgt-commands/$', views.ManagementCommandsView.as_view(), name='mgt-commands'),
url(r'^internal/fee_waiver/(?P<feewaiver_pk>\d+)/$', views.InternalFeeWaiverView.as_view(), name='internal-feewaiver-detail'),
url(r'^history/fee_waiver/(?P<pk>\d+)/$', views.FeeWaiverHistoryCompareView.as_view(), name='feewaiver_history'),
] + ledger_patterns
if settings.DEBUG: # Serve media locally in development.
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.SHOW_DEBUG_TOOLBAR:
import debug_toolbar
urlpatterns = [
url('__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
| 0
| 0
| 0
|
54f82229c0438a79d9123d69c7d0467d0c47c179
| 1,758
|
py
|
Python
|
ros/src/twist_controller/twist_controller.py
|
Acharya-Kiran/CarND-Capstone
|
bc5f59ea20271e2e46e156fff86cd2482b52c5f2
|
[
"MIT"
] | null | null | null |
ros/src/twist_controller/twist_controller.py
|
Acharya-Kiran/CarND-Capstone
|
bc5f59ea20271e2e46e156fff86cd2482b52c5f2
|
[
"MIT"
] | null | null | null |
ros/src/twist_controller/twist_controller.py
|
Acharya-Kiran/CarND-Capstone
|
bc5f59ea20271e2e46e156fff86cd2482b52c5f2
|
[
"MIT"
] | null | null | null |
from pid import PID
from lowpass import LowPassFilter
from yaw_controller import YawController
import rospy
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
| 27.904762
| 101
| 0.755973
|
from pid import PID
from lowpass import LowPassFilter
from yaw_controller import YawController
import rospy
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
class Controller(object):
def __init__(self,vehicle_mass,fuel_capacity,brake_deadband,decel_limit,
accel_limit,wheel_radius,wheel_base,steer_ratio,max_lat_accel,max_steer_angle):
# TODO: Implement
self.yaw_controller = YawController(wheel_base,steer_ratio,0.1,max_lat_accel,max_steer_angle)
kp=0.3
ki=0.1
kd=0.
mn=0.
mx=0.2
self.throttle_controller=PID(kp,ki,kd,mn,mx)
tau=0.5
ts=.02
self.vel_lpf = LowPassFilter(tau,ts)
self.vehicle_mass = vehicle_mass
self.fuel_capacity=fuel_capacity
self.brake_deadband=brake_deadband
self.decel_limit=decel_limit
self.accel_limit=accel_limit
self.wheel_radius=wheel_radius
self.last_time = rospy.get_time()
def control(self, current_vel,dbw_enabled,linear_vel,angular_vel):
# TODO: Change the arg, kwarg list to suit your needs
# Return throttle, brake, steer
if not dbw_enabled:
self.throttle_controller.reset()
return 0., 0., 0.
current_vel = self.vel_lpf.filt(current_vel)
steering = self.yaw_controller.get_steering(linear_vel,angular_vel,current_vel)
vel_error = linear_vel - current_vel
self.last_vel = current_vel
current_time = rospy.get_time()
sample_time = current_time - self.last_time
self.last_time = current_time
throttle = self.throttle_controller.step(vel_error,sample_time)
brake = 0
if linear_vel==0 and current_vel<0.1:
throttle=0
brake=400
elif throttle<.1 and vel_error<0:
throttle=0
decel = max(vel_error,self.decel_limit)
brake = abs(decel)*self.vehicle_mass*self.wheel_radius
return throttle,brake,steering
| 1,525
| 4
| 78
|
9ecb3b223a203a77d74b6711d0796c6b4e890962
| 27,213
|
py
|
Python
|
others/Pytorch/utilis_rnn.py
|
jhuebotter/CartpoleSNNdemo
|
d18a85cbc45bff48295c46c9cd8c9fc00192318c
|
[
"MIT"
] | null | null | null |
others/Pytorch/utilis_rnn.py
|
jhuebotter/CartpoleSNNdemo
|
d18a85cbc45bff48295c46c9cd8c9fc00192318c
|
[
"MIT"
] | null | null | null |
others/Pytorch/utilis_rnn.py
|
jhuebotter/CartpoleSNNdemo
|
d18a85cbc45bff48295c46c9cd8c9fc00192318c
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from torch.utils import data
from datetime import datetime
import collections
import os
import random as rnd
import copy
from Modeling.Pytorch.utilis_rnn_specific import *
from SI_Toolkit.load_and_normalize import load_normalization_info, load_data, normalize_df, denormalize_df
def get_device():
"""
Small function to correctly send data to GPU or CPU depending what is available
"""
if torch.cuda.is_available():
device = torch.device('cuda:0')
else:
device = torch.device('cpu')
return device
# Set seeds everywhere required to make results reproducible
# Print parameter count
# https://stackoverflow.com/questions/49201236/check-the-total-number-of-parameters-in-a-pytorch-model
def load_pretrained_rnn(net, pt_path, device):
"""
A function loading parameters (weights and biases) from a previous training to a net RNN instance
:param net: An instance of RNN
:param pt_path: path to .pt file storing weights and biases
:return: No return. Modifies net in place.
"""
pre_trained_model = torch.load(pt_path, map_location=device)
print("Loading Model: ", pt_path)
print('')
pre_trained_model = list(pre_trained_model.items())
new_state_dict = collections.OrderedDict()
count = 0
num_param_key = len(pre_trained_model)
for key, value in net.state_dict().items():
if count >= num_param_key:
break
layer_name, weights = pre_trained_model[count]
new_state_dict[key] = weights
# print("Pre-trained Layer: %s - Loaded into new layer: %s" % (layer_name, key))
count += 1
print('')
net.load_state_dict(new_state_dict)
# Initialize weights and biases - should be only applied if no pretrained net loaded
# FIXME: To tailor this sequence class according to the commands and state_variables of cartpole
class Sequence(nn.Module):
""""
Our RNN class.
"""
def reset(self):
"""
Reset the network (not the weights!)
"""
self.sample_counter = 0
self.h = [None] * len(self.h_size)
self.c = [None] * len(self.h_size)
self.output = None
self.outputs = []
def forward(self, rnn_input):
"""
Predicts future CartPole states IN "OPEN LOOP"
(at every time step prediction for the next time step is done based on the true CartPole state)
"""
# Initialize hidden layers - this change at every call as the batch size may vary
for i in range(len(self.h_size)):
self.h[i] = torch.zeros(rnn_input.size(1), self.h_size[i], dtype=torch.float).to(self.device)
self.c[i] = torch.zeros(rnn_input.size(1), self.h_size[i], dtype=torch.float).to(self.device)
# The for loop takes the consecutive time steps from input plugs them into RNN and save the outputs into a list
# THE NETWORK GETS ALWAYS THE GROUND TRUTH, THE REAL STATE OF THE CARTPOLE, AS ITS INPUT
# IT PREDICTS THE STATE OF THE CARTPOLE ONE TIME STEP AHEAD BASED ON TRUE STATE NOW
for iteration, input_t in enumerate(rnn_input.chunk(rnn_input.size(0), dim=0)):
# Propagate input through RNN layers
if self.rnn_type == 'LSTM':
self.h[0], self.c[0] = self.layers[0](input_t.squeeze(0), (self.h[0], self.c[0]))
for i in range(len(self.h_size) - 1):
self.h[i + 1], self.c[i + 1] = self.layers[i + 1](self.h[i], (self.h[i + 1], self.c[i + 1]))
else:
self.h[0] = self.layers[0](input_t.squeeze(0), self.h[0])
for i in range(len(self.h_size) - 1):
self.h[i + 1] = self.layers[i + 1](self.h[i], self.h[i + 1])
self.output = self.layers[-1](self.h[-1])
self.outputs += [self.output]
self.sample_counter = self.sample_counter + 1
# In the train mode we want to continue appending the outputs by calling forward function
# The outputs will be saved internally in the network instance as a list
# Otherwise we want to transform outputs list to a tensor and return it
return self.output
import pandas as pd
#
# def load_data(a, filepath=None, columns_list=None, norm_inf=False, rnn_full_name=None, downsample=1):
# if filepath is None:
# filepath = a.val_file_name
#
# if columns_list is None:
# columns_list = list(set(a.inputs_list).union(set(a.outputs_list)))
#
# if type(filepath) == list:
# filepaths = filepath
# else:
# filepaths = [filepath]
#
# all_dfs = [] # saved separately to get normalization
# all_time_axes = []
#
# for one_filepath in filepaths:
# # Load dataframe
# print('loading data from ' + str(one_filepath))
# print('')
# df = pd.read_csv(one_filepath, comment='#')
# df=df.iloc[::downsample].reset_index()
#
# # You can shift dt by one time step to know "now" the timestep till the next row
# if a.cheat_dt:
# if 'dt' in df:
# df['dt'] = df['dt'].shift(-1)
# df = df[:-1]
#
# # FIXME: Make calculation of dt compatible with downsampling
# # Get time axis as separate Dataframe
# if 'time' in df.columns:
# t = df['time']
# elif 'dt' in df.columns:
# dt = df['dt']
# t = dt.cumsum()
# t.rename('time', inplace=True)
# else:
# t = pd.Series([])
# t.rename('time', inplace=True)
#
# time_axis = t
# all_time_axes.append(time_axis)
#
# # Get only relevant subset of columns
# if columns_list == 'all':
# pass
# else:
# df = df[columns_list]
#
# all_dfs.append(df)
#
#
# return all_dfs, all_time_axes
#
# # This way of doing normalization is fine for long data sets and (relatively) short sequence lengths
# # The points from the edges of the datasets count too little
# def calculate_normalization_info(df, PATH_TO_EXPERIMENT_RECORDINGS, rnn_full_name):
# if type(df) is list:
# df_total = pd.concat(df)
# else:
# df_total = df
#
# if 'time' in df_total.columns:
# df_total.drop('time',
# axis='columns', inplace=True)
#
# df_mean = df_total.mean(axis=0)
# df_std = df_total.std(axis=0)
# df_max = df_total.max(axis=0)
# df_min = df_total.min(axis=0)
# frame = {'mean': df_mean, 'std': df_std, 'max': df_max, 'min': df_min}
# df_norm_info = pd.DataFrame(frame).transpose()
#
# df_norm_info.to_csv(PATH_TO_EXPERIMENT_RECORDINGS + rnn_full_name + '-norm' + '.csv')
#
# # Plot historgrams to make the firs check about gaussian assumption
# # for feature in df_total.columns:
# # plt.hist(df_total[feature].to_numpy(), 50, density=True, facecolor='g', alpha=0.75)
# # plt.title(feature)
# # plt.show()
#
# return df_norm_info
#
#
# def load_normalization_info(PATH_TO_EXPERIMENT_RECORDINGS, rnn_full_name):
# return pd.read_csv(PATH_TO_EXPERIMENT_RECORDINGS + rnn_full_name + '-norm' + '.csv', index_col=0)
#
#
# def normalize_df(dfs, normalization_info, normalization_type='minmax_sym'):
# if normalization_type == 'gaussian':
# def normalize_feature(col):
# col_mean = normalization_info.loc['mean', col.name]
# col_std = normalization_info.loc['std', col.name]
# return (col - col_mean) / col_std
# elif normalization_type == 'minmax_pos':
# def normalize_feature(col):
# col_min = normalization_info.loc['min', col.name]
# col_max = normalization_info.loc['max', col.name]
# return (col - col_min) / (col_max - col_min)
# elif normalization_type == 'minmax_sym':
# def normalize_feature(col):
# col_min = normalization_info.loc['min', col.name]
# col_max = normalization_info.loc['max', col.name]
# return -1.0 + 2.0 * (col - col_min) / (col_max - col_min)
#
# if type(dfs) is list:
# for i in range(len(dfs)):
# dfs[i] = dfs[i].apply(normalize_feature, axis=0)
# else:
# dfs = dfs.apply(normalize_feature, axis=0)
#
# return dfs
#
#
# def denormalize_df(dfs, normalization_info, normalization_type='minmax_sym'):
# if normalization_type == 'gaussian':
# def denormalize_feature(col):
# col_mean = normalization_info.loc['mean', col.name]
# col_std = normalization_info.loc['std', col.name]
# return col * col_std + col_mean
# elif normalization_type == 'minmax_pos':
# def denormalize_feature(col):
# col_min = normalization_info.loc['min', col.name]
# col_max = normalization_info.loc['max', col.name]
# return col * (col_max - col_min) + col_min
# elif normalization_type == 'minmax_sym':
# def denormalize_feature(col):
# col_min = normalization_info.loc['min', col.name]
# col_max = normalization_info.loc['max', col.name]
# return ((col + 1.0) / 2.0) * (col_max - col_min) + col_min
#
# if type(dfs) is list:
# for i in range(len(dfs)):
# dfs[i] = dfs[i].apply(denormalize_feature, axis=0)
# else:
# dfs = dfs.apply(denormalize_feature, axis=0)
#
# return dfs
def plot_results(net,
args,
dataset=None,
normalization_info = None,
time_axes=None,
filepath=None,
inputs_list=None,
outputs_list=None,
closed_loop_list=None,
seq_len=None,
warm_up_len=None,
closed_loop_enabled=False,
comment='',
rnn_full_name=None,
save=False,
close_loop_idx=512):
"""
This function accepts RNN instance, arguments and CartPole instance.
It runs one random experiment with CartPole,
inputs the data into RNN and check how well RNN predicts CartPole state one time step ahead of time
"""
rnn_full_name = net.rnn_full_name
if filepath is None:
filepath = args.val_file_name
if type(filepath) == list:
filepath = filepath[0]
if warm_up_len is None:
warm_up_len = args.warm_up_len
if seq_len is None:
seq_len = args.seq_len
if inputs_list is None:
inputs_list = args.inputs_list
if inputs_list is None:
raise ValueError('RNN inputs not provided!')
if outputs_list is None:
outputs_list = args.outputs_list
if outputs_list is None:
raise ValueError('RNN outputs not provided!')
if closed_loop_enabled and (closed_loop_list is None):
closed_loop_list = args.close_loop_for
if closed_loop_list is None:
raise ValueError('RNN closed-loop-inputs not provided!')
net.reset()
net.eval()
device = get_device()
if normalization_info is None:
normalization_info = load_normalization_info(args.PATH_TO_EXPERIMENT_RECORDINGS, rnn_full_name)
if dataset is None or time_axes is None:
test_dfs, time_axes = load_data(args, filepath)
test_dfs_norm = normalize_df(test_dfs, normalization_info)
test_set = Dataset(test_dfs_norm, args, time_axes=time_axes, seq_len=seq_len)
del test_dfs
else:
test_set = copy.deepcopy(dataset)
test_set.reset_seq_len(seq_len=seq_len)
# Format the experiment data
features, targets, time_axis = test_set.get_experiment(1) # Put number in brackets to get the same idx at every run
features_pd = pd.DataFrame(data=features, columns=inputs_list)
targets_pd = pd.DataFrame(data=targets, columns=outputs_list)
rnn_outputs = pd.DataFrame(columns=outputs_list)
warm_up_idx = 0
rnn_input_0 = copy.deepcopy(features_pd.iloc[0])
# Does not bring anything. Why? 0-state shouldn't have zero internal state due to biases...
while warm_up_idx < warm_up_len:
rnn_input = rnn_input_0
rnn_input = np.squeeze(rnn_input.to_numpy())
rnn_input = torch.from_numpy(rnn_input).float().unsqueeze(0).unsqueeze(0).to(device)
net(rnn_input=rnn_input)
warm_up_idx += 1
net.outputs = []
net.sample_counter = 0
idx_cl = 0
close_the_loop = False
for index, row in features_pd.iterrows():
rnn_input = pd.DataFrame(copy.deepcopy(row)).transpose().reset_index(drop=True)
if idx_cl == close_loop_idx:
close_the_loop = True
if closed_loop_enabled and close_the_loop and (normalized_rnn_output is not None):
rnn_input[closed_loop_list] = normalized_rnn_output[closed_loop_list]
rnn_input = np.squeeze(rnn_input.to_numpy())
rnn_input = torch.from_numpy(rnn_input).float().unsqueeze(0).unsqueeze(0).to(device)
normalized_rnn_output = net(rnn_input=rnn_input)
normalized_rnn_output = np.squeeze(normalized_rnn_output.detach().cpu().numpy()).tolist()
normalized_rnn_output = copy.deepcopy(pd.DataFrame(data=[normalized_rnn_output], columns=outputs_list))
rnn_outputs = rnn_outputs.append(copy.deepcopy(normalized_rnn_output), ignore_index=True)
idx_cl += 1
targets_pd_denorm = denormalize_df(targets_pd, normalization_info)
rnn_outputs_denorm = denormalize_df(rnn_outputs, normalization_info)
fig, axs = plot_results_specific(targets_pd_denorm, rnn_outputs_denorm, time_axis, comment, closed_loop_enabled, close_loop_idx)
plt.show()
if save:
# Make folders if not yet exist
try:
os.makedirs('save_plots')
except FileExistsError:
pass
dateTimeObj = datetime.now()
timestampStr = dateTimeObj.strftime("-%d%b%Y_%H%M%S")
if rnn_full_name is not None:
fig.savefig('./save_plots/' + rnn_full_name + timestampStr + '.png')
else:
fig.savefig('./save_plots/' + timestampStr + '.png')
| 38.545326
| 132
| 0.614449
|
import torch
import torch.nn as nn
from torch.utils import data
from datetime import datetime
import collections
import os
import random as rnd
import copy
from Modeling.Pytorch.utilis_rnn_specific import *
from SI_Toolkit.load_and_normalize import load_normalization_info, load_data, normalize_df, denormalize_df
def get_device():
"""
Small function to correctly send data to GPU or CPU depending what is available
"""
if torch.cuda.is_available():
device = torch.device('cuda:0')
else:
device = torch.device('cpu')
return device
# Set seeds everywhere required to make results reproducible
def set_seed(args):
seed = args.seed
rnd.seed(seed)
np.random.seed(seed)
# Print parameter count
# https://stackoverflow.com/questions/49201236/check-the-total-number-of-parameters-in-a-pytorch-model
def print_parameter_count(net):
pytorch_total_params = sum(p.numel() for p in net.parameters())
pytorch_trainable_params = sum(p.numel() for p in net.parameters() if p.requires_grad)
print('::: # network all parameters: ' + str(pytorch_total_params))
print('::: # network trainable parameters: ' + str(pytorch_trainable_params))
print('')
def load_pretrained_rnn(net, pt_path, device):
"""
A function loading parameters (weights and biases) from a previous training to a net RNN instance
:param net: An instance of RNN
:param pt_path: path to .pt file storing weights and biases
:return: No return. Modifies net in place.
"""
pre_trained_model = torch.load(pt_path, map_location=device)
print("Loading Model: ", pt_path)
print('')
pre_trained_model = list(pre_trained_model.items())
new_state_dict = collections.OrderedDict()
count = 0
num_param_key = len(pre_trained_model)
for key, value in net.state_dict().items():
if count >= num_param_key:
break
layer_name, weights = pre_trained_model[count]
new_state_dict[key] = weights
# print("Pre-trained Layer: %s - Loaded into new layer: %s" % (layer_name, key))
count += 1
print('')
net.load_state_dict(new_state_dict)
# Initialize weights and biases - should be only applied if no pretrained net loaded
def initialize_weights_and_biases(net):
print('Initialize weights and biases')
for name, param in net.named_parameters():
print('Initialize {}'.format(name))
if 'gru' in name:
if 'weight' in name:
nn.init.orthogonal_(param)
if 'linear' in name:
if 'weight' in name:
nn.init.orthogonal_(param)
# nn.init.xavier_uniform_(param)
if 'bias' in name: # all biases
nn.init.constant_(param, 0)
print('')
def create_rnn_instance(rnn_name=None, inputs_list=None, outputs_list=None, load_rnn=None, path_save=None, device=None):
if load_rnn is not None and load_rnn != 'last':
# 1) Find csv with this name if exists load name, inputs and outputs list
# if it does not exist raise error
# 2) Create corresponding net
# 3) Load parameters from corresponding pt file
filename = load_rnn
print('Loading a pretrained RNN with the full name: {}'.format(filename))
print('')
txt_filename = filename + '.txt'
pt_filename = filename + '.pt'
txt_path = path_save + txt_filename
pt_path = path_save + pt_filename
if not os.path.isfile(txt_path):
raise ValueError(
'The corresponding .txt file is missing (information about inputs and outputs) at the location {}'.format(
txt_path))
if not os.path.isfile(pt_path):
raise ValueError(
'The corresponding .pt file is missing (information about weights and biases) at the location {}'.format(
pt_path))
f = open(txt_path, 'r')
lines = f.readlines()
rnn_name = lines[1].rstrip("\n")
inputs_list = lines[7].rstrip("\n").split(sep=', ')
outputs_list = lines[10].rstrip("\n").split(sep=', ')
f.close()
print('Inputs to the loaded RNN: {}'.format(', '.join(map(str, inputs_list))))
print('Outputs from the loaded RNN: {}'.format(', '.join(map(str, outputs_list))))
print('')
# Construct the requested RNN
net = Sequence(rnn_name=rnn_name, inputs_list=inputs_list, outputs_list=outputs_list)
net.rnn_full_name = load_rnn
# Load the parameters
load_pretrained_rnn(net, pt_path, device)
elif load_rnn == 'last':
files_found = False
while (not files_found):
try:
import glob
list_of_files = glob.glob(path_save + '/*.txt')
txt_path = max(list_of_files, key=os.path.getctime)
except FileNotFoundError:
raise ValueError('No information about any pretrained network found at {}'.format(path_save))
f = open(txt_path, 'r')
lines = f.readlines()
rnn_name = lines[1].rstrip("\n")
pre_rnn_full_name = lines[4].rstrip("\n")
inputs_list = lines[7].rstrip("\n").split(sep=', ')
outputs_list = lines[10].rstrip("\n").split(sep=', ')
f.close()
pt_path = path_save + pre_rnn_full_name + '.pt'
if not os.path.isfile(pt_path):
print('The .pt file is missing (information about weights and biases) at the location {}'.format(
pt_path))
print('I delete the corresponding .txt file and try to search again')
print('')
os.remove(txt_path)
else:
files_found = True
print('Full name of the loaded RNN is {}'.format(pre_rnn_full_name))
print('Inputs to the loaded RNN: {}'.format(', '.join(map(str, inputs_list))))
print('Outputs from the loaded RNN: {}'.format(', '.join(map(str, outputs_list))))
print('')
# Construct the requested RNN
net = Sequence(rnn_name=rnn_name, inputs_list=inputs_list, outputs_list=outputs_list)
net.rnn_full_name = pre_rnn_full_name
# Load the parameters
load_pretrained_rnn(net, pt_path, device)
else: # a.load_rnn is None
print('No pretrained network specified. I will train a network from scratch.')
print('')
# Construct the requested RNN
net = Sequence(rnn_name=rnn_name, inputs_list=inputs_list, outputs_list=outputs_list)
initialize_weights_and_biases(net)
return net, rnn_name, inputs_list, outputs_list
def create_log_file(rnn_name, inputs_list, outputs_list, path_save):
rnn_full_name = rnn_name[:4] + str(len(inputs_list)) + 'IN-' + rnn_name[4:] + '-' + str(len(outputs_list)) + 'OUT'
net_index = 0
while True:
txt_path = path_save + rnn_full_name + '-' + str(net_index) + '.txt'
if os.path.isfile(txt_path):
pass
else:
rnn_full_name += '-' + str(net_index)
f = open(txt_path, 'w')
f.write('RNN NAME: \n' + rnn_name + '\n\n')
f.write('RNN FULL NAME: \n' + rnn_full_name + '\n\n')
f.write('INPUTS: \n' + ', '.join(map(str, inputs_list)) + '\n\n')
f.write('OUTPUTS: \n' + ', '.join(map(str, outputs_list)) + '\n\n')
f.close()
break
net_index += 1
print('Full name given to the currently trained network is {}.'.format(rnn_full_name))
print('')
return rnn_full_name
# FIXME: To tailor this sequence class according to the commands and state_variables of cartpole
class Sequence(nn.Module):
""""
Our RNN class.
"""
def __init__(self, rnn_name, inputs_list, outputs_list):
super(Sequence, self).__init__()
"""Initialization of an RNN instance
We assume that inputs may be both commands and state variables, whereas outputs are always state variables
"""
# Check if GPU is available. If yes device='cuda:0' if not device='cpu'
self.device = get_device()
self.rnn_name = rnn_name
self.rnn_full_name = None
# Get the information about network architecture from the network name
# Split the names into "LSTM/GRU", "128H1", "64H2" etc.
names = rnn_name.split('-')
layers = ['H1', 'H2', 'H3', 'H4', 'H5']
self.h_size = [] # Hidden layers sizes
for name in names:
for index, layer in enumerate(layers):
if layer in name:
# assign the variable with name obtained from list layers.
self.h_size.append(int(name[:-2]))
if not self.h_size:
raise ValueError('You have to provide the size of at least one hidden layer in rnn name')
if 'GRU' in names:
self.rnn_type = 'GRU'
elif 'LSTM' in names:
self.rnn_type = 'LSTM'
else:
self.rnn_type = 'RNN-Basic'
# Construct network
if self.rnn_type == 'GRU':
self.rnn_cell = [nn.GRUCell(len(inputs_list), self.h_size[0]).to(get_device())]
for i in range(len(self.h_size) - 1):
self.rnn_cell.append(nn.GRUCell(self.h_size[i], self.h_size[i + 1]).to(get_device()))
elif self.rnn_type == 'LSTM':
self.rnn_cell = [nn.LSTMCell(len(inputs_list), self.h_size[0]).to(get_device())]
for i in range(len(self.h_size) - 1):
self.rnn_cell.append(nn.LSTMCell(self.h_size[i], self.h_size[i + 1]).to(get_device()))
else:
self.rnn_cell = [nn.RNNCell(len(inputs_list), self.h_size[0]).to(get_device())]
for i in range(len(self.h_size) - 1):
self.rnn_cell.append(nn.RNNCell(self.h_size[i], self.h_size[i + 1]).to(get_device()))
self.linear = nn.Linear(self.h_size[-1], len(outputs_list)) # RNN out
self.layers = nn.ModuleList([])
for cell in self.rnn_cell:
self.layers.append(cell)
self.layers.append(self.linear)
# Count data samples (=time steps)
self.sample_counter = 0
# Declaration of the variables keeping internal state of GRU hidden layers
self.h = [None] * len(self.h_size)
self.c = [None] * len(self.h_size) # Internal state cell - only matters for LSTM
# Variable keeping the most recent output of RNN
self.output = None
# List storing the history of RNN outputs
self.outputs = []
# Send the whole RNN to GPU if available, otherwise send it to CPU
self.to(self.device)
print('Constructed a neural network of type {}, with {} hidden layers with sizes {} respectively.'
.format(self.rnn_type, len(self.h_size), ', '.join(map(str, self.h_size))))
print('The inputs are (in this order): {}'.format(', '.join(map(str, inputs_list))))
print('The outputs are (in this order): {}'.format(', '.join(map(str, outputs_list))))
def reset(self):
"""
Reset the network (not the weights!)
"""
self.sample_counter = 0
self.h = [None] * len(self.h_size)
self.c = [None] * len(self.h_size)
self.output = None
self.outputs = []
def forward(self, rnn_input):
"""
Predicts future CartPole states IN "OPEN LOOP"
(at every time step prediction for the next time step is done based on the true CartPole state)
"""
# Initialize hidden layers - this change at every call as the batch size may vary
for i in range(len(self.h_size)):
self.h[i] = torch.zeros(rnn_input.size(1), self.h_size[i], dtype=torch.float).to(self.device)
self.c[i] = torch.zeros(rnn_input.size(1), self.h_size[i], dtype=torch.float).to(self.device)
# The for loop takes the consecutive time steps from input plugs them into RNN and save the outputs into a list
# THE NETWORK GETS ALWAYS THE GROUND TRUTH, THE REAL STATE OF THE CARTPOLE, AS ITS INPUT
# IT PREDICTS THE STATE OF THE CARTPOLE ONE TIME STEP AHEAD BASED ON TRUE STATE NOW
for iteration, input_t in enumerate(rnn_input.chunk(rnn_input.size(0), dim=0)):
# Propagate input through RNN layers
if self.rnn_type == 'LSTM':
self.h[0], self.c[0] = self.layers[0](input_t.squeeze(0), (self.h[0], self.c[0]))
for i in range(len(self.h_size) - 1):
self.h[i + 1], self.c[i + 1] = self.layers[i + 1](self.h[i], (self.h[i + 1], self.c[i + 1]))
else:
self.h[0] = self.layers[0](input_t.squeeze(0), self.h[0])
for i in range(len(self.h_size) - 1):
self.h[i + 1] = self.layers[i + 1](self.h[i], self.h[i + 1])
self.output = self.layers[-1](self.h[-1])
self.outputs += [self.output]
self.sample_counter = self.sample_counter + 1
# In the train mode we want to continue appending the outputs by calling forward function
# The outputs will be saved internally in the network instance as a list
# Otherwise we want to transform outputs list to a tensor and return it
return self.output
def return_outputs_history(self):
return torch.stack(self.outputs, 1)
import pandas as pd
#
# def load_data(a, filepath=None, columns_list=None, norm_inf=False, rnn_full_name=None, downsample=1):
# if filepath is None:
# filepath = a.val_file_name
#
# if columns_list is None:
# columns_list = list(set(a.inputs_list).union(set(a.outputs_list)))
#
# if type(filepath) == list:
# filepaths = filepath
# else:
# filepaths = [filepath]
#
# all_dfs = [] # saved separately to get normalization
# all_time_axes = []
#
# for one_filepath in filepaths:
# # Load dataframe
# print('loading data from ' + str(one_filepath))
# print('')
# df = pd.read_csv(one_filepath, comment='#')
# df=df.iloc[::downsample].reset_index()
#
# # You can shift dt by one time step to know "now" the timestep till the next row
# if a.cheat_dt:
# if 'dt' in df:
# df['dt'] = df['dt'].shift(-1)
# df = df[:-1]
#
# # FIXME: Make calculation of dt compatible with downsampling
# # Get time axis as separate Dataframe
# if 'time' in df.columns:
# t = df['time']
# elif 'dt' in df.columns:
# dt = df['dt']
# t = dt.cumsum()
# t.rename('time', inplace=True)
# else:
# t = pd.Series([])
# t.rename('time', inplace=True)
#
# time_axis = t
# all_time_axes.append(time_axis)
#
# # Get only relevant subset of columns
# if columns_list == 'all':
# pass
# else:
# df = df[columns_list]
#
# all_dfs.append(df)
#
#
# return all_dfs, all_time_axes
#
# # This way of doing normalization is fine for long data sets and (relatively) short sequence lengths
# # The points from the edges of the datasets count too little
# def calculate_normalization_info(df, PATH_TO_EXPERIMENT_RECORDINGS, rnn_full_name):
# if type(df) is list:
# df_total = pd.concat(df)
# else:
# df_total = df
#
# if 'time' in df_total.columns:
# df_total.drop('time',
# axis='columns', inplace=True)
#
# df_mean = df_total.mean(axis=0)
# df_std = df_total.std(axis=0)
# df_max = df_total.max(axis=0)
# df_min = df_total.min(axis=0)
# frame = {'mean': df_mean, 'std': df_std, 'max': df_max, 'min': df_min}
# df_norm_info = pd.DataFrame(frame).transpose()
#
# df_norm_info.to_csv(PATH_TO_EXPERIMENT_RECORDINGS + rnn_full_name + '-norm' + '.csv')
#
# # Plot historgrams to make the firs check about gaussian assumption
# # for feature in df_total.columns:
# # plt.hist(df_total[feature].to_numpy(), 50, density=True, facecolor='g', alpha=0.75)
# # plt.title(feature)
# # plt.show()
#
# return df_norm_info
#
#
# def load_normalization_info(PATH_TO_EXPERIMENT_RECORDINGS, rnn_full_name):
# return pd.read_csv(PATH_TO_EXPERIMENT_RECORDINGS + rnn_full_name + '-norm' + '.csv', index_col=0)
#
#
# def normalize_df(dfs, normalization_info, normalization_type='minmax_sym'):
# if normalization_type == 'gaussian':
# def normalize_feature(col):
# col_mean = normalization_info.loc['mean', col.name]
# col_std = normalization_info.loc['std', col.name]
# return (col - col_mean) / col_std
# elif normalization_type == 'minmax_pos':
# def normalize_feature(col):
# col_min = normalization_info.loc['min', col.name]
# col_max = normalization_info.loc['max', col.name]
# return (col - col_min) / (col_max - col_min)
# elif normalization_type == 'minmax_sym':
# def normalize_feature(col):
# col_min = normalization_info.loc['min', col.name]
# col_max = normalization_info.loc['max', col.name]
# return -1.0 + 2.0 * (col - col_min) / (col_max - col_min)
#
# if type(dfs) is list:
# for i in range(len(dfs)):
# dfs[i] = dfs[i].apply(normalize_feature, axis=0)
# else:
# dfs = dfs.apply(normalize_feature, axis=0)
#
# return dfs
#
#
# def denormalize_df(dfs, normalization_info, normalization_type='minmax_sym'):
# if normalization_type == 'gaussian':
# def denormalize_feature(col):
# col_mean = normalization_info.loc['mean', col.name]
# col_std = normalization_info.loc['std', col.name]
# return col * col_std + col_mean
# elif normalization_type == 'minmax_pos':
# def denormalize_feature(col):
# col_min = normalization_info.loc['min', col.name]
# col_max = normalization_info.loc['max', col.name]
# return col * (col_max - col_min) + col_min
# elif normalization_type == 'minmax_sym':
# def denormalize_feature(col):
# col_min = normalization_info.loc['min', col.name]
# col_max = normalization_info.loc['max', col.name]
# return ((col + 1.0) / 2.0) * (col_max - col_min) + col_min
#
# if type(dfs) is list:
# for i in range(len(dfs)):
# dfs[i] = dfs[i].apply(denormalize_feature, axis=0)
# else:
# dfs = dfs.apply(denormalize_feature, axis=0)
#
# return dfs
class Dataset(data.Dataset):
def __init__(self, dfs, args, time_axes=None, seq_len=None):
'Initialization - divide data in features and labels'
self.data = []
self.labels = []
for df in dfs:
# Get Raw Data
features = copy.deepcopy(df)
targets = copy.deepcopy(df)
features.drop(features.tail(1).index, inplace=True) # Drop last row
targets.drop(targets.head(1).index, inplace=True)
features.reset_index(inplace=True) # Reset index
targets.reset_index(inplace=True)
features = features[args.inputs_list]
targets = targets[args.outputs_list]
self.data.append(features)
self.labels.append(targets)
self.args = args
self.seq_len = None
self.df_lengths = []
self.df_lengths_cs = []
self.number_of_samples = 0
self.time_axes = time_axes
self.reset_seq_len(seq_len=seq_len)
def reset_seq_len(self, seq_len=None):
"""
This method should be used if the user wants to change the seq_len without creating new Dataset
Please remember that one can reset it again to come back to old configuration
:param seq_len: Gives new user defined seq_len. Call empty to come back to default.
"""
if seq_len is None:
self.seq_len = self.args.seq_len # Sequence length
else:
self.seq_len = seq_len
self.df_lengths = []
self.df_lengths_cs = []
if type(self.data) == list:
for data_set in self.data:
self.df_lengths.append(data_set.shape[0] - self.seq_len)
if not self.df_lengths_cs:
self.df_lengths_cs.append(self.df_lengths[0])
else:
self.df_lengths_cs.append(self.df_lengths_cs[-1] + self.df_lengths[-1])
self.number_of_samples = self.df_lengths_cs[-1]
else:
self.number_of_samples = self.data.shape[0] - self.seq_len
def __len__(self):
'Total number of samples'
return self.number_of_samples
def __getitem__(self, idx, get_time_axis=False):
"""
Requires the self.data to be a list of pandas dataframes
"""
# Find index of the dataset in self.data and index of the starting point in this dataset
idx_data_set = next(i for i, v in enumerate(self.df_lengths_cs) if v > idx)
if idx_data_set == 0:
pass
else:
idx -= self.df_lengths_cs[idx_data_set - 1]
# Get data
features = self.data[idx_data_set].to_numpy()[idx:idx + self.seq_len, :]
# Every point in features has its target value corresponding to the next time step:
targets = self.labels[idx_data_set].to_numpy()[idx:idx + self.seq_len]
# After feeding the whole sequence we just compare the final output of the RNN with the state following afterwards
# targets = self.labels[idx_data_set].to_numpy()[idx + self.seq_len-1]
# If get_time_axis try to obtain a vector of time data for the chosen sample
if get_time_axis:
try:
time_axis = self.time_axes[idx_data_set].to_numpy()[idx:idx + self.seq_len + 1]
except IndexError:
time_axis = []
# Return results
if get_time_axis:
return features, targets, time_axis
else:
return features, targets
def get_experiment(self, idx=None):
if self.time_axes is None:
raise Exception('No time information available!')
if idx is None:
idx = np.random.randint(0, self.number_of_samples)
return self.__getitem__(idx, get_time_axis=True)
def plot_results(net,
args,
dataset=None,
normalization_info = None,
time_axes=None,
filepath=None,
inputs_list=None,
outputs_list=None,
closed_loop_list=None,
seq_len=None,
warm_up_len=None,
closed_loop_enabled=False,
comment='',
rnn_full_name=None,
save=False,
close_loop_idx=512):
"""
This function accepts RNN instance, arguments and CartPole instance.
It runs one random experiment with CartPole,
inputs the data into RNN and check how well RNN predicts CartPole state one time step ahead of time
"""
rnn_full_name = net.rnn_full_name
if filepath is None:
filepath = args.val_file_name
if type(filepath) == list:
filepath = filepath[0]
if warm_up_len is None:
warm_up_len = args.warm_up_len
if seq_len is None:
seq_len = args.seq_len
if inputs_list is None:
inputs_list = args.inputs_list
if inputs_list is None:
raise ValueError('RNN inputs not provided!')
if outputs_list is None:
outputs_list = args.outputs_list
if outputs_list is None:
raise ValueError('RNN outputs not provided!')
if closed_loop_enabled and (closed_loop_list is None):
closed_loop_list = args.close_loop_for
if closed_loop_list is None:
raise ValueError('RNN closed-loop-inputs not provided!')
net.reset()
net.eval()
device = get_device()
if normalization_info is None:
normalization_info = load_normalization_info(args.PATH_TO_EXPERIMENT_RECORDINGS, rnn_full_name)
if dataset is None or time_axes is None:
test_dfs, time_axes = load_data(args, filepath)
test_dfs_norm = normalize_df(test_dfs, normalization_info)
test_set = Dataset(test_dfs_norm, args, time_axes=time_axes, seq_len=seq_len)
del test_dfs
else:
test_set = copy.deepcopy(dataset)
test_set.reset_seq_len(seq_len=seq_len)
# Format the experiment data
features, targets, time_axis = test_set.get_experiment(1) # Put number in brackets to get the same idx at every run
features_pd = pd.DataFrame(data=features, columns=inputs_list)
targets_pd = pd.DataFrame(data=targets, columns=outputs_list)
rnn_outputs = pd.DataFrame(columns=outputs_list)
warm_up_idx = 0
rnn_input_0 = copy.deepcopy(features_pd.iloc[0])
# Does not bring anything. Why? 0-state shouldn't have zero internal state due to biases...
while warm_up_idx < warm_up_len:
rnn_input = rnn_input_0
rnn_input = np.squeeze(rnn_input.to_numpy())
rnn_input = torch.from_numpy(rnn_input).float().unsqueeze(0).unsqueeze(0).to(device)
net(rnn_input=rnn_input)
warm_up_idx += 1
net.outputs = []
net.sample_counter = 0
idx_cl = 0
close_the_loop = False
for index, row in features_pd.iterrows():
rnn_input = pd.DataFrame(copy.deepcopy(row)).transpose().reset_index(drop=True)
if idx_cl == close_loop_idx:
close_the_loop = True
if closed_loop_enabled and close_the_loop and (normalized_rnn_output is not None):
rnn_input[closed_loop_list] = normalized_rnn_output[closed_loop_list]
rnn_input = np.squeeze(rnn_input.to_numpy())
rnn_input = torch.from_numpy(rnn_input).float().unsqueeze(0).unsqueeze(0).to(device)
normalized_rnn_output = net(rnn_input=rnn_input)
normalized_rnn_output = np.squeeze(normalized_rnn_output.detach().cpu().numpy()).tolist()
normalized_rnn_output = copy.deepcopy(pd.DataFrame(data=[normalized_rnn_output], columns=outputs_list))
rnn_outputs = rnn_outputs.append(copy.deepcopy(normalized_rnn_output), ignore_index=True)
idx_cl += 1
targets_pd_denorm = denormalize_df(targets_pd, normalization_info)
rnn_outputs_denorm = denormalize_df(rnn_outputs, normalization_info)
fig, axs = plot_results_specific(targets_pd_denorm, rnn_outputs_denorm, time_axis, comment, closed_loop_enabled, close_loop_idx)
plt.show()
if save:
# Make folders if not yet exist
try:
os.makedirs('save_plots')
except FileExistsError:
pass
dateTimeObj = datetime.now()
timestampStr = dateTimeObj.strftime("-%d%b%Y_%H%M%S")
if rnn_full_name is not None:
fig.savefig('./save_plots/' + rnn_full_name + timestampStr + '.png')
else:
fig.savefig('./save_plots/' + timestampStr + '.png')
| 9,315
| 3,525
| 189
|
a14001fe338c11a2de9e1cb5a8130727cb1dcd35
| 7,654
|
py
|
Python
|
resto_client/cli/parser/parser_configure_server.py
|
CNES/resto_client
|
7048bd79c739e33882ebd664790dcf0528e81aa4
|
[
"Apache-2.0"
] | 6
|
2019-12-20T09:12:30.000Z
|
2021-07-08T11:44:55.000Z
|
resto_client/cli/parser/parser_configure_server.py
|
CNES/resto_client
|
7048bd79c739e33882ebd664790dcf0528e81aa4
|
[
"Apache-2.0"
] | null | null | null |
resto_client/cli/parser/parser_configure_server.py
|
CNES/resto_client
|
7048bd79c739e33882ebd664790dcf0528e81aa4
|
[
"Apache-2.0"
] | 1
|
2019-12-17T20:16:39.000Z
|
2019-12-17T20:16:39.000Z
|
# -*- coding: utf-8 -*-
"""
.. admonition:: License
Copyright 2019 CNES
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License
is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied. See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
from resto_client.base_exceptions import RestoClientDesignError
from resto_client.services.service_access import (AuthenticationServiceAccess, RestoServiceAccess)
from resto_client.settings.resto_client_config import resto_client_print
from resto_client.settings.servers_database import DB_SERVERS
from .parser_common import CliFunctionReturnType
from .parser_settings import (SERVER_ARGNAME, RESTO_URL_ARGNAME, RESTO_PROTOCOL_ARGNAME,
AUTH_URL_ARGNAME, AUTH_PROTOCOL_ARGNAME)
def cli_create_server(args: argparse.Namespace) -> CliFunctionReturnType:
"""
CLI adapter to create a server definition
:param args: arguments parsed by the CLI parser
:returns: the resto client parameters and the resto server possibly built by this command.
"""
# TODO: Modify ServiceAcces such that lower is implemented in them
resto_access = RestoServiceAccess(getattr(args, RESTO_URL_ARGNAME),
getattr(args, RESTO_PROTOCOL_ARGNAME).lower())
auth_access = AuthenticationServiceAccess(getattr(args, AUTH_URL_ARGNAME),
getattr(args, AUTH_PROTOCOL_ARGNAME).lower())
DB_SERVERS.create_server(getattr(args, SERVER_ARGNAME), resto_access, auth_access)
return None, None
def cli_delete_server(args: argparse.Namespace) -> CliFunctionReturnType:
"""
CLI adapter to delete a server definition
:param args: arguments parsed by the CLI parser
:returns: the resto client parameters and the resto server possibly built by this command.
"""
DB_SERVERS.delete(getattr(args, SERVER_ARGNAME))
return None, None
def cli_edit_server(args: argparse.Namespace) -> CliFunctionReturnType:
"""
CLI adapter to edit the server characteristics
:param args: arguments parsed by the CLI parser
:raises RestoClientDesignError: unconditionally, as this function is not implemented yet
"""
raise RestoClientDesignError('Edit server unimplemented')
def cli_show_servers(args: argparse.Namespace) -> CliFunctionReturnType:
"""
CLI adapter to show the servers database
:param args: arguments parsed by the CLI parser
:returns: the resto client parameters and the resto server possibly built by this command.
"""
_ = args # to avoid pylint warning
resto_client_print(DB_SERVERS)
return None, None
# We need to specify argparse._SubParsersAction for mypy to run. Thus pylint squeals.
# pylint: disable=protected-access
def add_configure_server_subparser(sub_parsers: argparse._SubParsersAction) -> None:
"""
Add the 'configure_server' subparser
:param sub_parsers: argparse object used to add a parser for that subcommand.
"""
parser_configure_server = sub_parsers.add_parser(
'configure_server', help='configure servers known by resto_client.',
description='Allows to create, modify or delete servers characteristics: url, type, etc.',
epilog='Servers definition is stored in a configuration file and can be edited using this'
' command.')
help_msg = 'For more help: {} <parameter> -h'.format(parser_configure_server.prog)
sub_parsers_configure_server = parser_configure_server.add_subparsers(description=help_msg)
add_config_server_create_parser(sub_parsers_configure_server)
add_config_server_delete_parser(sub_parsers_configure_server)
add_config_server_edit_parser(sub_parsers_configure_server)
add_config_server_show_parser(sub_parsers_configure_server)
def add_config_server_create_parser(
sub_parsers_configure_server: argparse._SubParsersAction) -> None:
"""
Update the 'configure_server' command subparser with options for 'configure_server create'
:param sub_parsers_configure_server: argparse object used to add a parser for that subcommand.
"""
subparser = sub_parsers_configure_server.add_parser(
'create', help='create a new server',
description='Create a new server in the servers configuration database.')
_add_positional_args_parser(subparser)
subparser.set_defaults(func=cli_create_server)
def add_config_server_delete_parser(
sub_parsers_configure_server: argparse._SubParsersAction) -> None:
"""
Update the 'configure_server' command subparser with options for 'configure_server delete'
:param sub_parsers_configure_server: argparse object used to add a parser for that subcommand.
"""
subparser = sub_parsers_configure_server.add_parser(
'delete', help='delete an existing server',
description='Delete a server from the configuration database.')
subparser.add_argument(SERVER_ARGNAME, help='name of the server to delete')
subparser.set_defaults(func=cli_delete_server)
def add_config_server_edit_parser(
sub_parsers_configure_server: argparse._SubParsersAction) -> None:
"""
Update the 'configure_server' command subparser with options for 'configure_server edit'
:param sub_parsers_configure_server: argparse object used to add a parser for that subcommand.
"""
subparser = sub_parsers_configure_server.add_parser(
'edit', help='edit server characteristics',
description='Edit the characteristics of a server existing in the configuration database.')
_add_positional_args_parser(subparser)
subparser.set_defaults(func=cli_edit_server)
def add_config_server_show_parser(
sub_parsers_configure_server: argparse._SubParsersAction) -> None:
"""
Update the 'configure_server' command subparser with options for 'configure_server show'
:param sub_parsers_configure_server: argparse object used to add a parser for that subcommand.
"""
subparser = sub_parsers_configure_server.add_parser(
'show', help='show servers database',
description='Show all the servers defined in the database with their configuration.')
subparser.set_defaults(func=cli_show_servers)
def _add_positional_args_parser(subparser: argparse.ArgumentParser) -> None:
"""
Add the positional arguments parsing rules for configure_server subcommands
:param subparser: parser to be supplemented with positional arguments.
"""
subparser.add_argument(SERVER_ARGNAME, help='name of the server')
group_resto = subparser.add_argument_group('resto service')
group_resto.add_argument(RESTO_URL_ARGNAME, help='URL of the resto server')
group_resto.add_argument(RESTO_PROTOCOL_ARGNAME,
choices=RestoServiceAccess.supported_protocols(),
help='Protocol of the resto server')
group_auth = subparser.add_argument_group('authentication service')
group_auth.add_argument(AUTH_URL_ARGNAME, nargs='?', help='URL of the authentication server')
group_auth.add_argument(AUTH_PROTOCOL_ARGNAME,
choices=AuthenticationServiceAccess.supported_protocols(),
help='Protocol of the authentication server')
| 44.5
| 100
| 0.74902
|
# -*- coding: utf-8 -*-
"""
.. admonition:: License
Copyright 2019 CNES
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License
is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied. See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
from resto_client.base_exceptions import RestoClientDesignError
from resto_client.services.service_access import (AuthenticationServiceAccess, RestoServiceAccess)
from resto_client.settings.resto_client_config import resto_client_print
from resto_client.settings.servers_database import DB_SERVERS
from .parser_common import CliFunctionReturnType
from .parser_settings import (SERVER_ARGNAME, RESTO_URL_ARGNAME, RESTO_PROTOCOL_ARGNAME,
AUTH_URL_ARGNAME, AUTH_PROTOCOL_ARGNAME)
def cli_create_server(args: argparse.Namespace) -> CliFunctionReturnType:
"""
CLI adapter to create a server definition
:param args: arguments parsed by the CLI parser
:returns: the resto client parameters and the resto server possibly built by this command.
"""
# TODO: Modify ServiceAcces such that lower is implemented in them
resto_access = RestoServiceAccess(getattr(args, RESTO_URL_ARGNAME),
getattr(args, RESTO_PROTOCOL_ARGNAME).lower())
auth_access = AuthenticationServiceAccess(getattr(args, AUTH_URL_ARGNAME),
getattr(args, AUTH_PROTOCOL_ARGNAME).lower())
DB_SERVERS.create_server(getattr(args, SERVER_ARGNAME), resto_access, auth_access)
return None, None
def cli_delete_server(args: argparse.Namespace) -> CliFunctionReturnType:
"""
CLI adapter to delete a server definition
:param args: arguments parsed by the CLI parser
:returns: the resto client parameters and the resto server possibly built by this command.
"""
DB_SERVERS.delete(getattr(args, SERVER_ARGNAME))
return None, None
def cli_edit_server(args: argparse.Namespace) -> CliFunctionReturnType:
"""
CLI adapter to edit the server characteristics
:param args: arguments parsed by the CLI parser
:raises RestoClientDesignError: unconditionally, as this function is not implemented yet
"""
raise RestoClientDesignError('Edit server unimplemented')
def cli_show_servers(args: argparse.Namespace) -> CliFunctionReturnType:
"""
CLI adapter to show the servers database
:param args: arguments parsed by the CLI parser
:returns: the resto client parameters and the resto server possibly built by this command.
"""
_ = args # to avoid pylint warning
resto_client_print(DB_SERVERS)
return None, None
# We need to specify argparse._SubParsersAction for mypy to run. Thus pylint squeals.
# pylint: disable=protected-access
def add_configure_server_subparser(sub_parsers: argparse._SubParsersAction) -> None:
"""
Add the 'configure_server' subparser
:param sub_parsers: argparse object used to add a parser for that subcommand.
"""
parser_configure_server = sub_parsers.add_parser(
'configure_server', help='configure servers known by resto_client.',
description='Allows to create, modify or delete servers characteristics: url, type, etc.',
epilog='Servers definition is stored in a configuration file and can be edited using this'
' command.')
help_msg = 'For more help: {} <parameter> -h'.format(parser_configure_server.prog)
sub_parsers_configure_server = parser_configure_server.add_subparsers(description=help_msg)
add_config_server_create_parser(sub_parsers_configure_server)
add_config_server_delete_parser(sub_parsers_configure_server)
add_config_server_edit_parser(sub_parsers_configure_server)
add_config_server_show_parser(sub_parsers_configure_server)
def add_config_server_create_parser(
sub_parsers_configure_server: argparse._SubParsersAction) -> None:
"""
Update the 'configure_server' command subparser with options for 'configure_server create'
:param sub_parsers_configure_server: argparse object used to add a parser for that subcommand.
"""
subparser = sub_parsers_configure_server.add_parser(
'create', help='create a new server',
description='Create a new server in the servers configuration database.')
_add_positional_args_parser(subparser)
subparser.set_defaults(func=cli_create_server)
def add_config_server_delete_parser(
sub_parsers_configure_server: argparse._SubParsersAction) -> None:
"""
Update the 'configure_server' command subparser with options for 'configure_server delete'
:param sub_parsers_configure_server: argparse object used to add a parser for that subcommand.
"""
subparser = sub_parsers_configure_server.add_parser(
'delete', help='delete an existing server',
description='Delete a server from the configuration database.')
subparser.add_argument(SERVER_ARGNAME, help='name of the server to delete')
subparser.set_defaults(func=cli_delete_server)
def add_config_server_edit_parser(
sub_parsers_configure_server: argparse._SubParsersAction) -> None:
"""
Update the 'configure_server' command subparser with options for 'configure_server edit'
:param sub_parsers_configure_server: argparse object used to add a parser for that subcommand.
"""
subparser = sub_parsers_configure_server.add_parser(
'edit', help='edit server characteristics',
description='Edit the characteristics of a server existing in the configuration database.')
_add_positional_args_parser(subparser)
subparser.set_defaults(func=cli_edit_server)
def add_config_server_show_parser(
sub_parsers_configure_server: argparse._SubParsersAction) -> None:
"""
Update the 'configure_server' command subparser with options for 'configure_server show'
:param sub_parsers_configure_server: argparse object used to add a parser for that subcommand.
"""
subparser = sub_parsers_configure_server.add_parser(
'show', help='show servers database',
description='Show all the servers defined in the database with their configuration.')
subparser.set_defaults(func=cli_show_servers)
def _add_positional_args_parser(subparser: argparse.ArgumentParser) -> None:
"""
Add the positional arguments parsing rules for configure_server subcommands
:param subparser: parser to be supplemented with positional arguments.
"""
subparser.add_argument(SERVER_ARGNAME, help='name of the server')
group_resto = subparser.add_argument_group('resto service')
group_resto.add_argument(RESTO_URL_ARGNAME, help='URL of the resto server')
group_resto.add_argument(RESTO_PROTOCOL_ARGNAME,
choices=RestoServiceAccess.supported_protocols(),
help='Protocol of the resto server')
group_auth = subparser.add_argument_group('authentication service')
group_auth.add_argument(AUTH_URL_ARGNAME, nargs='?', help='URL of the authentication server')
group_auth.add_argument(AUTH_PROTOCOL_ARGNAME,
choices=AuthenticationServiceAccess.supported_protocols(),
help='Protocol of the authentication server')
| 0
| 0
| 0
|
472e53d4d44cd3cc04aaf44dbd4aac137138d3f3
| 1,224
|
py
|
Python
|
src/wlstm/utils.py
|
tedhuang96/mifwlstm
|
e1d5a3a1f954952ff5a1f28be08e703d1251e592
|
[
"MIT"
] | 11
|
2021-06-21T04:06:45.000Z
|
2022-02-22T20:53:45.000Z
|
src/wlstm/utils.py
|
tedhuang96/mifwlstm
|
e1d5a3a1f954952ff5a1f28be08e703d1251e592
|
[
"MIT"
] | null | null | null |
src/wlstm/utils.py
|
tedhuang96/mifwlstm
|
e1d5a3a1f954952ff5a1f28be08e703d1251e592
|
[
"MIT"
] | null | null | null |
import torch
from os.path import join, isdir, isfile
from os import listdir
import re
from src.wlstm.models import ReBiL
| 42.206897
| 125
| 0.686275
|
import torch
from os.path import join, isdir, isfile
from os import listdir
import re
from src.wlstm.models import ReBiL
def load_rebil_model(args, logdir, device='cuda:0'):
if not isdir(logdir):
print('The folder '+logdir+' is not found.')
return None
if args.eval_model_saved_epoch is None:
saved_epoch = args.num_epochs
else:
saved_epoch = args.eval_model_saved_epoch
for filename in listdir(logdir):
if isfile(join(logdir, filename)) and re.search('.*epoch_'+str(saved_epoch)+'.pt', filename):
model_filename = join(logdir, filename)
model = ReBiL(embedding_size=args.embedding_size, hidden_size=args.hidden_size, num_layers=args.num_layers, \
num_lstms=args.num_lstms, bidirectional=args.bidirectional, end_mask=args.end_mask, device=device).to(device)
checkpoint = torch.load(model_filename, map_location=device)
model.load_state_dict(checkpoint['model_state_dict'])
model.load_lstms_dict(checkpoint['lstms_dict'])
print(model_filename + ' is loaded.')
return model
print('The model is not saved at epoch '+str(saved_epoch)+' in '+logdir)
return None
| 1,079
| 0
| 23
|
4498832be13a9415d6ca76fd5ad2398b9e886b1d
| 1,059
|
py
|
Python
|
src/push_button.py
|
albang/arisa
|
9b7ea5e7befc92d1febb038476d03e858a622153
|
[
"MIT"
] | null | null | null |
src/push_button.py
|
albang/arisa
|
9b7ea5e7befc92d1febb038476d03e858a622153
|
[
"MIT"
] | null | null | null |
src/push_button.py
|
albang/arisa
|
9b7ea5e7befc92d1febb038476d03e858a622153
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import RPi.GPIO as GPIO # Import Raspberry Pi GPIO library
import os, time
os.system('mpg123 -g100 /home/pi/paw_patrol_courte.mp3 &')
GPIO.setwarnings(False) # Ignore warning for now
GPIO.setmode(GPIO.BOARD) # Use physical pin numbering
GPIO.setup(10, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # Set pin 10 to be an input pin and set initial value to be pulled low (off)
GPIO.add_event_detect(10,GPIO.RISING,callback=button_callback,bouncetime=4000) # Setup event on pin 10 rising edge
GPIO.setup(13, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # Set pin 10 to be an input pin and set initial value to be pulled low (off)
GPIO.add_event_detect(13,GPIO.RISING,callback=button_callback2,bouncetime=4000) # Setup event on pin 10 rising edge
while True:
time.sleep(100000)
GPIO.cleanup() # Clean up
| 40.730769
| 128
| 0.756374
|
#!/usr/bin/env python3
import RPi.GPIO as GPIO # Import Raspberry Pi GPIO library
import os, time
def button_callback(channel):
print("Button was pushed!")
os.system('mpg123 /home/pi/minute_courte.mp3 &')
def button_callback2(channel):
print("Button was pushed!")
os.system('mpg123 -g100 /home/pi/paw_patrol_courte.mp3 &')
os.system('mpg123 -g100 /home/pi/paw_patrol_courte.mp3 &')
GPIO.setwarnings(False) # Ignore warning for now
GPIO.setmode(GPIO.BOARD) # Use physical pin numbering
GPIO.setup(10, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # Set pin 10 to be an input pin and set initial value to be pulled low (off)
GPIO.add_event_detect(10,GPIO.RISING,callback=button_callback,bouncetime=4000) # Setup event on pin 10 rising edge
GPIO.setup(13, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # Set pin 10 to be an input pin and set initial value to be pulled low (off)
GPIO.add_event_detect(13,GPIO.RISING,callback=button_callback2,bouncetime=4000) # Setup event on pin 10 rising edge
while True:
time.sleep(100000)
GPIO.cleanup() # Clean up
| 198
| 0
| 46
|
4da98b7e4cedd701321a8df23f73f41ffd79cf6e
| 1,054
|
py
|
Python
|
src/utils.py
|
michaellas/streaming-vid-to-gifs
|
ee5df22c820d4d631f0437c98a53989ecb76dca3
|
[
"MIT"
] | null | null | null |
src/utils.py
|
michaellas/streaming-vid-to-gifs
|
ee5df22c820d4d631f0437c98a53989ecb76dca3
|
[
"MIT"
] | 1
|
2015-04-07T12:24:26.000Z
|
2015-04-07T12:28:30.000Z
|
src/utils.py
|
michaellas/streaming-vid-to-gifs
|
ee5df22c820d4d631f0437c98a53989ecb76dca3
|
[
"MIT"
] | null | null | null |
import time
import sys
if __name__ == '__main__':
'''
@log_called_times_decorator
def ff():
print 'f'
while True:
ff()
time.sleep(1)
'''
print_progress(45)
print ''
print_progress(x=20,max=200)
| 26.35
| 107
| 0.578748
|
import time
import sys
def log_called_times_decorator(func):
def wrapper(*args):
wrapper.count += 1
# print "The function I modify has been called {0} times(s).".format(wrapper.count)
now = time.time()
if now - wrapper.last_log > wrapper.dt:
print '[DEBUG] In last %ds %s() was called %d times' % (wrapper.dt,func.__name__,wrapper.count)
wrapper.count = 0
wrapper.last_log = now
return func(*args)
wrapper.count = 0
wrapper.last_log = time.time()
wrapper.dt = 5
return wrapper
def print_progress( percent=None, x=0, max=100):
if not percent:
percent = x*100.0/max
sys.stdout.write('\r')
bars = int(percent / 5)
sys.stdout.write("[%-20s] %d%% " % ('='*bars, int(percent)))
sys.stdout.flush()
if __name__ == '__main__':
'''
@log_called_times_decorator
def ff():
print 'f'
while True:
ff()
time.sleep(1)
'''
print_progress(45)
print ''
print_progress(x=20,max=200)
| 748
| 0
| 46
|
7e9bde1a168f5b214f14f1b43d8d2d70b12ae817
| 11,187
|
py
|
Python
|
org/hasii/chip8/ui/Chip8UIScreen.py
|
hasii2011/Chip8Emulator
|
96be8c0d01ccae0492ce0f980af905ec5c690f1a
|
[
"MIT"
] | null | null | null |
org/hasii/chip8/ui/Chip8UIScreen.py
|
hasii2011/Chip8Emulator
|
96be8c0d01ccae0492ce0f980af905ec5c690f1a
|
[
"MIT"
] | 8
|
2019-08-12T23:33:12.000Z
|
2020-12-09T01:31:17.000Z
|
org/hasii/chip8/ui/Chip8UIScreen.py
|
hasii2011/Chip8Emulator
|
96be8c0d01ccae0492ce0f980af905ec5c690f1a
|
[
"MIT"
] | null | null | null |
from typing import List
from os import getcwd
from os.path import basename
from pkg_resources import resource_filename
from logging import Logger
from logging import getLogger
from pygame import event as Event
from pygame import Surface
from pygame.font import Font
from albow.References import AttrRef
from albow.References import ItemRef
from albow.themes.Theme import Theme
from albow.core.ui.Widget import Widget
from albow.core.ui.Screen import Screen
from albow.dialog.FileDialogUtilities import request_old_filename
from albow.dialog.TitledDialog import TitledDialog
from albow.core.ui.Shell import Shell
from albow.core.ui.AlbowEventLoop import AlbowEventLoop
from albow.menu.Menu import Menu
from albow.menu.MenuBar import MenuBar
from albow.menu.MenuItem import MenuItem
from albow.layout.Column import Column
from albow.layout.Row import Row
from albow.layout.Frame import Frame
from albow.widgets.Label import Label
from albow.widgets.ValueDisplay import ValueDisplay
from org.hasii.chip8.Version import Version
from org.hasii.chip8.Chip8 import Chip8
from org.hasii.chip8.keyboard.Chip8KeyPadKeys import Chip8KeyPadKeys
from org.hasii.chip8.Chip8RegisterName import Chip8RegisterName
from org.hasii.chip8.ui.Chip8Screen import Chip8Screen
from org.hasii.chip8.errors.InvalidIndexRegisterValue import InvalidIndexRegisterValue
from org.hasii.chip8.errors.UnknownInstructionError import UnknownInstructionError
from org.hasii.chip8.errors.UnKnownSpecialRegistersSubOpCode import UnKnownSpecialRegistersSubOpCode
from org.hasii.chip8.ui.Chip8UIStack import Chip8UIStack
from org.hasii.chip8.ui.Chip8UIInstructionList import Chip8UIInstructionList
from org.hasii.chip8.ui.Chip8Beep import Chip8Beep
| 37.29
| 141
| 0.653169
|
from typing import List
from os import getcwd
from os.path import basename
from pkg_resources import resource_filename
from logging import Logger
from logging import getLogger
from pygame import event as Event
from pygame import Surface
from pygame.font import Font
from albow.References import AttrRef
from albow.References import ItemRef
from albow.themes.Theme import Theme
from albow.core.ui.Widget import Widget
from albow.core.ui.Screen import Screen
from albow.dialog.FileDialogUtilities import request_old_filename
from albow.dialog.TitledDialog import TitledDialog
from albow.core.ui.Shell import Shell
from albow.core.ui.AlbowEventLoop import AlbowEventLoop
from albow.menu.Menu import Menu
from albow.menu.MenuBar import MenuBar
from albow.menu.MenuItem import MenuItem
from albow.layout.Column import Column
from albow.layout.Row import Row
from albow.layout.Frame import Frame
from albow.widgets.Label import Label
from albow.widgets.ValueDisplay import ValueDisplay
from org.hasii.chip8.Version import Version
from org.hasii.chip8.Chip8 import Chip8
from org.hasii.chip8.keyboard.Chip8KeyPadKeys import Chip8KeyPadKeys
from org.hasii.chip8.Chip8RegisterName import Chip8RegisterName
from org.hasii.chip8.ui.Chip8Screen import Chip8Screen
from org.hasii.chip8.errors.InvalidIndexRegisterValue import InvalidIndexRegisterValue
from org.hasii.chip8.errors.UnknownInstructionError import UnknownInstructionError
from org.hasii.chip8.errors.UnKnownSpecialRegistersSubOpCode import UnKnownSpecialRegistersSubOpCode
from org.hasii.chip8.ui.Chip8UIStack import Chip8UIStack
from org.hasii.chip8.ui.Chip8UIInstructionList import Chip8UIInstructionList
from org.hasii.chip8.ui.Chip8Beep import Chip8Beep
class Chip8UIScreen(Screen):
FONT_PKG: str = 'org.hasii.chip8.resources'
CPU_CYCLE_EVENT: int = AlbowEventLoop.MUSIC_END_EVENT + 1
SIXTY_HERTZ: int = 1000 // 60
fileItems = [
MenuItem(text="Load", command="processLoad"),
MenuItem(text="Exit", command="processExit"),
]
helpItems = [
MenuItem(text="About", command="processAbout"),
MenuItem(text="Help", command="processHelp"),
]
fileMenu: Menu = Menu(title="File", items=fileItems)
helpMenu: Menu = Menu(title="Help", items=helpItems)
def __init__(self, theShell: Shell, theSurface: Surface):
"""
Args:
theShell: The shell that wraps this screen
theSurface: The pygame surface to use to drawn on
Returns: An instance of itself
"""
super().__init__(theShell)
self.surface: Surface = theSurface
self.logger: Logger = getLogger(__name__)
self.chip8: Chip8 = Chip8()
fullFileName: str = self._findFont('MonoFonto.ttf')
self.internalsFont: Font = Font(fullFileName, 13)
self.note = Chip8Beep(440)
self.labelAttrs = {
'fg_color': Theme.WHITE,
'bg_color': Theme.LAMAS_MEDIUM_BLUE,
'font': self.internalsFont,
}
self.rowColumnAttrs = {
'bg_color': Theme.LAMAS_MEDIUM_BLUE,
'margin': 2,
'spacing': 3,
}
menus = [
Chip8UIScreen.fileMenu, Chip8UIScreen.helpMenu
]
menuBar = MenuBar(menus=menus, width=self.shell.width)
framedMenuBar: Frame = Frame(client=menuBar, width=self.shell.width)
chip8Screen: Chip8Screen = Chip8Screen(self.chip8.virtualScreen)
internalsDisp: Row = self.makeCpuInternalsDisplay()
registerDisp: Row = self.makeRegisterDisplay()
stackDisp: Column = self.makeStackDisplay()
instrDisp: Column = self.makeInstructionListDisplay()
registerStackDisp: Row = Row([registerDisp, stackDisp, instrDisp], align='b', **self.rowColumnAttrs)
contentAttrs = {
"align": "l",
'expand': 0,
'bg_color': Theme.LAMAS_MEDIUM_BLUE,
'margin': 1,
'spacing': 2,
}
contents = Column([framedMenuBar, chip8Screen, internalsDisp, registerStackDisp], **contentAttrs)
self.logger.debug(f"Menu bar size: {framedMenuBar.size}, shell width: {self.shell.width}")
self.add(contents)
def timer_event(self, theEvent: Event):
"""
The shell set this up to be called at the CHIP8 60Hz rate;
So here we will
* emulate a CPU cycle
* decrement both the CHIP 8 delay timer and the sound timer
Args:
theEvent:
"""
# clock = Clock()
# milliseconds = clock.tick(1000) # milliseconds passed since last frame; needs to agree witH Chip8UIShell value
# self.logger.info(f"milliseconds: {milliseconds}")
milliseconds: float = theEvent.dict['time']
seconds: float = milliseconds/1000
self.logger.debug(f"seconds: {seconds:5.3f}")
try:
if self.chip8.romLoaded is True:
if self.chip8.isCPUWaitingForKeyPress() is False:
self.chip8.emulateSingleCpuCycle()
self.chip8.decrementDelayTimer()
self.chip8.decrementSoundTimer()
if self.chip8.soundTimer == 0:
self.note.stop()
except (UnknownInstructionError, InvalidIndexRegisterValue, UnKnownSpecialRegistersSubOpCode) as e:
self.logger.error(f"Chip 8 failure: {e}")
self.logger.error(f"Chip Dump:\n {self.chip8}")
self.chip8.debugPrintMemory = True
self.logger.error(f' MEMORY DUMP')
self.logger.error(f'____________________________________________________________________')
self.chip8._debugPrintMemory(startByteNbr=0, nBytes=len(self.chip8.memory))
self.shell.quit()
return True
def key_down(self, theKeyEvent: Event):
"""
Seems like part of the Chip 8 emulator has to happen here:
http://laurencescotford.co.uk/?p=347
Args:
theKeyEvent: The PyGame key event
"""
pressedKey: Chip8KeyPadKeys = Chip8KeyPadKeys.toEnum(theKeyEvent.key)
self.logger.debug(f"key down: {pressedKey.value:X}")
if pressedKey != Chip8KeyPadKeys.UNSUPPORTED:
self.chip8.keypad.keyDown(pressedKey)
self.logger.debug(f"keypad: {self.chip8.keypad}")
if self.chip8.keyPressData.waitingForKey is True:
self.chip8.setKeyPressed(pressedKey)
self.note.play(-1)
def key_up(self, theKeyEvent: Event):
releasedKey: Chip8KeyPadKeys = Chip8KeyPadKeys.toEnum(theKeyEvent.key)
self.logger.debug(f"key up: {releasedKey.value:X}")
if releasedKey != Chip8KeyPadKeys.UNSUPPORTED:
self.chip8.keypad.keyUp(releasedKey)
self.logger.debug(f"keypad: {self.chip8.keypad}")
self.note.stop()
def processLoad_cmd(self):
cwd: str = getcwd() + '/org/hasii/chip8/roms'
path = request_old_filename(directory=cwd)
self.logger.info(f'path: {path}')
self.chip8.resetCPU()
fName: str = basename(path)
self.chip8.loadROM(theFilename=fName)
def processExit_cmd(self):
self.logger.info("Executed exit item command")
self.shell.quit()
def processAbout_cmd(self):
ttlDlg: TitledDialog = TitledDialog(title='Chip8 Python', message=f'Version {Version}, by Humberto A. Sanchez II')
response = ttlDlg.present()
self.logger.info(f'response: {response}')
def processHelp_cmd(self):
self.logger.info("Executed help item command")
def makeCpuInternalsDisplay(self) -> Row:
pcRow: Row = self._makeLabelValueRow(refName='pc', attrLabel='PC:', attrFormat='0x%04X', valueWidth=50)
idxRow: Row = self._makeLabelValueRow(refName='indexRegister', attrLabel='Idx:', attrFormat='0x%04X', valueWidth=42)
sndTimerRow: Row = self._makeLabelValueRow(refName='soundTimer', attrLabel='Sound Timer:', attrFormat='0x%04X', valueWidth=42)
dlyTimerRow: Row = self._makeLabelValueRow(refName='delayTimer', attrLabel='Delay Timer:', attrFormat='0x%04X', valueWidth=42)
instCountRow: Row = self._makeLabelValueRow(refName='instructionCount', attrLabel='Inst Cnt:', valueWidth=50)
retAttrs = {
'bg_color': Theme.LAMAS_MEDIUM_BLUE,
'fg_color': Theme.WHITE,
'spacing': 2,
}
retContainer: Row = Row([pcRow, idxRow, sndTimerRow, dlyTimerRow, instCountRow], **retAttrs)
return retContainer
def makeRegisterDisplay(self) -> Row:
leftList: List[Widget] = []
rightList: List[Widget] = []
for regName in Chip8RegisterName:
itemRef: ItemRef = ItemRef(base=self.chip8.registers, index=regName)
regLabel: Label = Label(regName.name + ':', **self.labelAttrs)
regValue: ValueDisplay = ValueDisplay(ref=itemRef, width=42, **self.labelAttrs)
regValue.format = '0x%04X'
pairRow: Row = Row([regLabel, regValue], **self.rowColumnAttrs)
if regName.value % 2:
rightList.append(pairRow)
else:
leftList.append(pairRow)
leftColumn: Column = Column(leftList, **self.rowColumnAttrs)
rightColumn: Column = Column(rightList, **self.rowColumnAttrs)
gridAttrs = {
'bg_color': Theme.LAMAS_MEDIUM_BLUE,
'margin': 2,
'border_width': 1
}
retGrid: Row = Row([leftColumn, rightColumn], **gridAttrs)
return retGrid
def makeStackDisplay(self) -> Column:
stackLabel: Label = Label("Stack", **self.labelAttrs)
stackBox: Chip8UIStack = Chip8UIStack(theChipStack=self.chip8.stack)
stackContainer: Column = Column([stackLabel, stackBox], **self.rowColumnAttrs)
return stackContainer
def makeInstructionListDisplay(self) -> Column:
instrLabel: Label = Label("Instructions", **self.labelAttrs)
instrBox: Chip8UIInstructionList = Chip8UIInstructionList(instructionList=self.chip8.instructionList)
instrContainer: Column = Column([instrLabel, instrBox], **self.rowColumnAttrs)
return instrContainer
def _makeLabelValueRow(self, refName: str, attrLabel: str, attrFormat: str = None, valueWidth: int = 100) -> Row:
attrRef: AttrRef = AttrRef(base=self.chip8, name=refName)
attrLabel: Label = Label(attrLabel, **self.labelAttrs)
attrValue: ValueDisplay = ValueDisplay(ref=attrRef, width=valueWidth, **self.labelAttrs)
if attrFormat is not None:
attrValue.format = attrFormat
retRow: Row = Row([attrLabel, attrValue], **self.rowColumnAttrs)
return retRow
def _findFont(self, theFileName: str):
fileName = resource_filename(Chip8UIScreen.FONT_PKG, theFileName)
self.logger.debug(f"The full file name: {fileName}")
return fileName
| 4,272
| 5,165
| 23
|
4495fdf8627af041231ecfd1e216c9c24557ea8c
| 847
|
py
|
Python
|
monte_carlo.py
|
yandexdataschool/pyretina
|
300d3cd460ded071d75d3729e9b5dc1489d86d73
|
[
"Apache-2.0"
] | 2
|
2016-05-28T15:59:47.000Z
|
2018-07-30T21:05:18.000Z
|
monte_carlo.py
|
yandexdataschool/pyretina
|
300d3cd460ded071d75d3729e9b5dc1489d86d73
|
[
"Apache-2.0"
] | null | null | null |
monte_carlo.py
|
yandexdataschool/pyretina
|
300d3cd460ded071d75d3729e9b5dc1489d86d73
|
[
"Apache-2.0"
] | null | null | null |
from pyretina.mc import monte_carlo
import numpy as np
import json
import os
import os.path as osp
import shutil
number_of_events = 10
if __name__ == "__main__":
main("config/mc.json")
| 21.175
| 82
| 0.641086
|
from pyretina.mc import monte_carlo
import numpy as np
import json
import os
import os.path as osp
import shutil
number_of_events = 10
def main(conf):
with open(conf, 'r') as f:
config = json.load(f)
for N in np.arange(20, 520, 20):
config['scattering']['number_of_particles'] = {
'type' : 'randint',
'low' : N,
'high' : N + 1
}
plot_dir = osp.join('./events_img', '%d_particles' % N)
try:
shutil.rmtree(plot_dir)
except:
pass
os.mkdir(plot_dir)
events = monte_carlo(number_of_events, config, plot_dir=plot_dir, plot_each=2)
import cPickle as pickle
with open('data/mini_velo_sim_%d.pickled' % N, 'w') as f:
pickle.dump(events, f)
print 'Generated %d events with %d particles' % (number_of_events, N)
if __name__ == "__main__":
main("config/mc.json")
| 634
| 0
| 23
|
18ed809f9eec9232085b1804143efe6ca93e3a6e
| 5,950
|
py
|
Python
|
miner.py
|
OwlEyes33/crypto-alpha
|
dc3b39ecf38f3f445ecd94057775220b651633fc
|
[
"Apache-2.0"
] | null | null | null |
miner.py
|
OwlEyes33/crypto-alpha
|
dc3b39ecf38f3f445ecd94057775220b651633fc
|
[
"Apache-2.0"
] | null | null | null |
miner.py
|
OwlEyes33/crypto-alpha
|
dc3b39ecf38f3f445ecd94057775220b651633fc
|
[
"Apache-2.0"
] | null | null | null |
import logging
import os
import time
from math import inf
from os import environ
from threading import Thread
import requests
from redis import Redis
from block import Block
from blockchain import Blockchain
from peer2peer import PeerToPeerMessage
from transaction import Transaction
logging.basicConfig(level=logging.DEBUG)
if __name__ == "__main__":
miner = Miner()
miner.routine()
| 37.421384
| 86
| 0.557479
|
import logging
import os
import time
from math import inf
from os import environ
from threading import Thread
import requests
from redis import Redis
from block import Block
from blockchain import Blockchain
from peer2peer import PeerToPeerMessage
from transaction import Transaction
logging.basicConfig(level=logging.DEBUG)
class Miner(object):
def __init__(self, *args, **kwargs):
self.transactions = kwargs.get('transactions', {})
self.block_size = 64
self.miner = list()
self.peers = environ.get('PEERS', 'http://localhost:8000').split(',')
assert len(self.peers)
self.cached_p2p_messages = dict()
self.blockchain = Blockchain()
self.redis_cli = Redis(host='redis')
self.sync_to_redis()
def get_peers_blockchain(self):
try:
blockchains = dict()
_max = -inf
best_peer = None
with open("blockchain.dat", "rb") as f:
blockchain_size = len(f.read())
for peer in self.peers:
r = requests.get("http://{}/api/blockchain".format(peer))
if r.json().get('size'):
size = int(r.json().get('size'))
if size > _max:
_max = size
best_peer = peer
blockchains[peer] = r.json().get('size')
if _max > blockchain_size:
logging.debug("Downloading new blockchain from: {}".format(best_peer))
os.rename('blockchain.dat', 'blockchain.backup')
r = requests.get("http://{}/api/sync".format(best_peer))
with open('blockchain.dat', 'wb') as f:
f.write(r.content)
if self.blockchain.verify_blockchain():
os.remove('blockchain.backup')
else:
os.remove('blockchain.dat')
os.rename('blockchain.backup', 'blockchain.dat')
except requests.exceptions.ConnectionError:
pass
def sync_to_redis(self):
for _, key in enumerate(self.transactions):
self.redis_cli[key] = str(self.transactions[key])
self.transactions = {}
def broadcast_new_block(self, block):
p2p = PeerToPeerMessage(block=block)
for peer in self.peers:
r = requests.post("http://{}/api/block".format(peer), data=p2p.to_json())
assert r.status_code <= 299
@staticmethod
def ping_peer_transactions(peer, p2p_message):
logging.debug("Forwarding transactions to nearest peer {}".format(peer))
payload = p2p_message.to_json()
try:
requests.post("http://{}/api/transactions".format(peer), data=payload)
except requests.exceptions.ConnectionError as e:
logging.warning("Connection error {}".format(str(e)))
@staticmethod
def ping_peer_block(peer, p2p_message):
logging.debug("Forwarding block to nearest peer {}".format(peer))
payload = p2p_message.to_json()
try:
requests.post("http://{}/api/block".format(peer), data=payload)
except requests.exceptions.ConnectionError as e:
logging.warning("Connection error {}".format(str(e)))
def forward(self, p2p, target):
for peer in self.peers:
t = Thread(target=target, args=(peer, p2p))
t.start()
# Todo: Transactions should be sorted by timestamp
def compile_block(self):
data = str()
i = 0
for transaction_id in self.redis_cli.keys():
if i < 64:
try:
transaction = self.redis_cli[transaction_id]
t = Transaction()
transaction = t.from_string(transaction.decode('utf-8'))
if not transaction.verify_signature():
logging.warning("Transaction signature not valid")
continue
data = data + str(transaction) + '\n'
self.redis_cli.delete(transaction.id)
i = i + 1
except IndexError:
return False
block = Block(data=data)
return block
def do_proof_of_work(self, block, first=False):
if block:
magic_number = 0
while True:
block.magic_number = magic_number
if not first:
block.blockchain_snapshot = self.blockchain.get_sha512hash()
else:
block.blockchain_snapshot = 'None'
sha512hash = block.generate_hash()
block.sha512hash = sha512hash
if block.check_proof_of_work():
block.magic_number = magic_number
block.sha512hash = sha512hash
return block
magic_number = magic_number + 1
def routine(self):
# Check if there is a new blockchain version
while True:
logging.debug("Requesting new blockchain info from P2P network")
self.get_peers_blockchain()
time.sleep(1)
# Check if we have transactions
if len(list(self.redis_cli.keys())):
# Compile a block
logging.debug("Building a new block")
block = self.compile_block()
# Do proof of work
logging.debug("Doing proof of work on block")
block = self.do_proof_of_work(block)
# Verify a block
logging.debug("Verifying the block")
if self.blockchain.verify_blockchain(new_block=block):
# Write the block
logging.debug("Writing a new block")
self.blockchain.write_new_block(block)
if __name__ == "__main__":
miner = Miner()
miner.routine()
| 5,170
| 359
| 23
|
940189421ca5db8b06f5e381219db498733f8003
| 95
|
py
|
Python
|
mumu/decorators/__init__.py
|
mingminyu/mumu
|
e9f6c86a0b678ce4467ffba7f3dc4c0c8f971ff8
|
[
"Apache-2.0"
] | 1
|
2021-06-22T16:57:28.000Z
|
2021-06-22T16:57:28.000Z
|
mumu/decorators/__init__.py
|
mingminyu/mumu
|
e9f6c86a0b678ce4467ffba7f3dc4c0c8f971ff8
|
[
"Apache-2.0"
] | null | null | null |
mumu/decorators/__init__.py
|
mingminyu/mumu
|
e9f6c86a0b678ce4467ffba7f3dc4c0c8f971ff8
|
[
"Apache-2.0"
] | null | null | null |
from ._timeit import timeit
from ._progressbar import pbar_sql_query
from ._retry import retry
| 23.75
| 40
| 0.842105
|
from ._timeit import timeit
from ._progressbar import pbar_sql_query
from ._retry import retry
| 0
| 0
| 0
|
1486c16002e2c1f7f36eced992718519ad8c6db1
| 959
|
py
|
Python
|
web2py-appliances-master/MyForum/models/db.py
|
wantsomechocolate/WantsomeBeanstalk
|
8c8a0a80490d04ea52661a3114fd3db8de65a01e
|
[
"BSD-3-Clause"
] | null | null | null |
web2py-appliances-master/MyForum/models/db.py
|
wantsomechocolate/WantsomeBeanstalk
|
8c8a0a80490d04ea52661a3114fd3db8de65a01e
|
[
"BSD-3-Clause"
] | null | null | null |
web2py-appliances-master/MyForum/models/db.py
|
wantsomechocolate/WantsomeBeanstalk
|
8c8a0a80490d04ea52661a3114fd3db8de65a01e
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
DEBUG = True
db = DAL('sqlite://storage.sqlite',pool_size=1,check_reserved=['all'])
response.generic_patterns = ['*'] if request.is_local else []
from gluon.tools import Auth, Service, prettydate
auth = Auth(db)
auth.define_tables(username=False, signature=False)
service = Service()
## configure email
mail = auth.settings.mailer
mail.settings.server = 'logging' or 'smtp.gmail.com:587'
mail.settings.sender = 'you@gmail.com'
mail.settings.login = 'username:password'
## configure auth policy
auth.settings.registration_requires_verification = False
auth.settings.registration_requires_approval = False
auth.settings.reset_password_requires_verification = True
## if you need to use OpenID, Facebook, MySpace, Twitter, Linkedin, etc.
## register with janrain.com, write your domain:api_key in private/janrain.key
from gluon.contrib.login_methods.rpx_account import use_janrain
use_janrain(auth, filename='private/janrain.key')
| 33.068966
| 78
| 0.777894
|
# -*- coding: utf-8 -*-
DEBUG = True
db = DAL('sqlite://storage.sqlite',pool_size=1,check_reserved=['all'])
response.generic_patterns = ['*'] if request.is_local else []
from gluon.tools import Auth, Service, prettydate
auth = Auth(db)
auth.define_tables(username=False, signature=False)
service = Service()
## configure email
mail = auth.settings.mailer
mail.settings.server = 'logging' or 'smtp.gmail.com:587'
mail.settings.sender = 'you@gmail.com'
mail.settings.login = 'username:password'
## configure auth policy
auth.settings.registration_requires_verification = False
auth.settings.registration_requires_approval = False
auth.settings.reset_password_requires_verification = True
## if you need to use OpenID, Facebook, MySpace, Twitter, Linkedin, etc.
## register with janrain.com, write your domain:api_key in private/janrain.key
from gluon.contrib.login_methods.rpx_account import use_janrain
use_janrain(auth, filename='private/janrain.key')
| 0
| 0
| 0
|
0cd0801dcd3a7dfddc9f817c743870fca0f08fa8
| 34
|
py
|
Python
|
python/cendalytics/report/core/bp/__init__.py
|
jiportilla/ontology
|
8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40
|
[
"MIT"
] | null | null | null |
python/cendalytics/report/core/bp/__init__.py
|
jiportilla/ontology
|
8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40
|
[
"MIT"
] | null | null | null |
python/cendalytics/report/core/bp/__init__.py
|
jiportilla/ontology
|
8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40
|
[
"MIT"
] | null | null | null |
from .report_api import ReportAPI
| 17
| 33
| 0.852941
|
from .report_api import ReportAPI
| 0
| 0
| 0
|
9ded2fcc8e677e149baf4d0a230b66939619b9e9
| 8,353
|
py
|
Python
|
conceptnet5/vectors/retrofit.py
|
MattCurryCom/conceptnet5
|
a16d94e635aee3d35a22aa04fcad7bb87ce927d8
|
[
"Apache-2.0"
] | 1
|
2018-11-27T17:00:57.000Z
|
2018-11-27T17:00:57.000Z
|
conceptnet5/vectors/retrofit.py
|
MattCurryCom/conceptnet5
|
a16d94e635aee3d35a22aa04fcad7bb87ce927d8
|
[
"Apache-2.0"
] | null | null | null |
conceptnet5/vectors/retrofit.py
|
MattCurryCom/conceptnet5
|
a16d94e635aee3d35a22aa04fcad7bb87ce927d8
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
import numpy as np
from sklearn.preprocessing import normalize
from .sparse_matrix_builder import build_from_conceptnet_table
from .formats import load_hdf, save_hdf
def retrofit(row_labels, dense_frame, sparse_csr,
iterations=5, verbosity=0, max_cleanup_iters=20,
orig_vec_weight=0.15):
"""
Retrofitting is a process of combining information from a machine-learned
space of term vectors with further structured information about those
terms. It was originally presented in this 2015 NAACL paper by Manaal
Faruqui, Jesse Dodge, Sujay Jauhar, Chris Dyer, Eduard Hovy, and Noah
Smith, "Retrofitting Word Vectors to Semantic Lexicons":
https://www.cs.cmu.edu/~hovy/papers/15HLT-retrofitting-word-vectors.pdf
This function implements a variant that I've been calling "wide
retrofitting", which extends the process to learn vectors for terms that
were outside the original space.
`row_labels` is the list of terms that we want to have vectors for.
`dense_frame` is a DataFrame assigning vectors to some of these terms.
`sparse_csr` is a SciPy sparse square matrix, whose rows and columns are
implicitly labeled with `row_labels`. The entries of this matrix are
positive for terms that we know are related from our structured data.
(This is an awkward form of input, but unfortunately there is no good
way to represent sparse labeled data in Pandas.)
`sharded_retrofit` is responsible for building `row_labels` and `sparse_csr`
appropriately.
"""
# Initialize a DataFrame with rows that we know
retroframe = pd.DataFrame(
index=row_labels, columns=dense_frame.columns, dtype='f'
)
retroframe.update(dense_frame)
# orig_weights = 1 for known vectors, 0 for unknown vectors
orig_weights = 1 - retroframe.iloc[:, 0].isnull()
orig_vec_indicators = (orig_weights.values != 0)
orig_vecs = retroframe.fillna(0).values
# Subtract the mean so that vectors don't just clump around common
# hypernyms
orig_vecs[orig_vec_indicators] -= orig_vecs[orig_vec_indicators].mean(0)
# Delete the frame we built, we won't need its indices again until the end
del retroframe
vecs = orig_vecs
for iteration in range(iterations):
if verbosity >= 1:
print('Retrofitting: Iteration %s of %s' % (iteration+1, iterations))
# Since the sparse weight matrix is row-stochastic and has self-loops,
# pre-multiplication by it replaces each vector by a weighted average
# of itself and its neighbors. We really want to take the average
# of (itself and) the nonzero neighbors, which we can do by dividing
# the average with all the neighbors by the total of the weights of the
# nonzero neighbors. This avoids unduly shrinking vectors assigned to
# terms with lots of zero neighbors.
# Find, for every term, the total weight of its nonzero neighbors.
nonzero_indicators = (np.abs(vecs).sum(1) != 0)
total_neighbor_weights = sparse_csr.dot(nonzero_indicators)
# Now average with all the neighbors.
vecs = sparse_csr.dot(vecs)
# Now divide each vector (row) by the associated total weight.
# Some of the total weights could be zero, but only for rows that,
# before averaging, were zero and had all neighbors zero, whence
# after averaging will be zero. So only do the division for rows
# that are nonzero now, after averaging. Also, we reshape the total
# weights into a column vector so that numpy will broadcast the
# division by weights across the columns of the embedding matrix.
nonzero_indicators = (np.abs(vecs).sum(1) != 0)
total_neighbor_weights = total_neighbor_weights[nonzero_indicators]
total_neighbor_weights = total_neighbor_weights.reshape((len(total_neighbor_weights), 1))
vecs[nonzero_indicators] /= total_neighbor_weights
# Re-center the (new) non-zero vectors.
vecs[nonzero_indicators] -= vecs[nonzero_indicators].mean(0)
# Average known rows with original vectors
vecs[orig_vec_indicators, :] = \
(1.0 - orig_vec_weight) * vecs[orig_vec_indicators, :] + orig_vec_weight * orig_vecs[orig_vec_indicators, :]
# Clean up as many all-zero vectors as possible. Zero vectors
# can either come from components of the conceptnet graph that
# don't contain any terms from the embedding we are currently
# retrofitting (and there is nothing we can do about those here,
# but when retrofitting is done on that embedding they should be
# taken care of then) or from terms whose distance in the graph is
# larger than the number of retrofitting iterations used above; we
# propagate non-zero values to those terms by averaging over their
# non-zero neighbors. Note that this propagation can never reach
# the first class of terms, so we can't necessarily expect the
# number of zero vectors to go to zero at any one invocation of
# this code.
n_zero_indicators_old = -1
for iteration in range(max_cleanup_iters):
zero_indicators = (np.abs(vecs).sum(1) == 0)
n_zero_indicators = np.sum(zero_indicators)
if n_zero_indicators == 0 or n_zero_indicators == n_zero_indicators_old:
break
n_zero_indicators_old = n_zero_indicators
# First replace each zero vector (row) by the weighted average of all its
# neighbors.
vecs[zero_indicators, :] = sparse_csr[zero_indicators, :].dot(vecs)
# Now divide each newly nonzero vector (row) by the total weight of its
# old nonzero neighbors.
new_nonzero_indicators = np.logical_and(zero_indicators, np.abs(vecs).sum(1) != 0)
total_neighbor_weights = sparse_csr[new_nonzero_indicators, :].dot(np.logical_not(zero_indicators))
total_neighbor_weights = total_neighbor_weights.reshape((len(total_neighbor_weights), 1))
vecs[new_nonzero_indicators, :] /= total_neighbor_weights
else:
print('Warning: cleanup iteration limit exceeded.')
retroframe = pd.DataFrame(data=vecs, index=row_labels, columns=dense_frame.columns)
return retroframe
| 48.005747
| 130
| 0.704058
|
import pandas as pd
import numpy as np
from sklearn.preprocessing import normalize
from .sparse_matrix_builder import build_from_conceptnet_table
from .formats import load_hdf, save_hdf
def sharded_retrofit(dense_hdf_filename, conceptnet_filename, output_filename,
iterations=5, nshards=6, verbosity=0,
max_cleanup_iters=20, orig_vec_weight=0.15):
# frame_box is basically a reference to a single large DataFrame. The
# DataFrame will at times be present or absent. When it's present, the list
# contains one item, which is the DataFrame. When it's absent, the list
# is empty.
frame_box = [load_hdf(dense_hdf_filename)]
sparse_csr, combined_index = build_from_conceptnet_table(conceptnet_filename, orig_index=frame_box[0].index)
shard_width = frame_box[0].shape[1] // nshards
for i in range(nshards):
temp_filename = output_filename + '.shard%d' % i
shard_from = shard_width * i
shard_to = shard_from + shard_width
if len(frame_box) == 0:
frame_box.append(load_hdf(dense_hdf_filename))
dense_frame = pd.DataFrame(frame_box[0].iloc[:, shard_from:shard_to])
# Delete full_dense_frame while running retrofitting, because it takes
# up a lot of memory and we can reload it from disk later.
frame_box.clear()
retrofitted = retrofit(combined_index, dense_frame, sparse_csr, iterations, verbosity, max_cleanup_iters, orig_vec_weight)
save_hdf(retrofitted, temp_filename)
del retrofitted
def join_shards(output_filename, nshards=6, sort=False):
joined_matrix = None
joined_labels = None
for i in range(nshards):
shard = load_hdf(output_filename + '.shard%d' % i)
nrows, ncols = shard.shape
if joined_matrix is None:
joined_matrix = np.zeros((nrows, ncols * nshards), dtype='f')
joined_labels = shard.index
joined_matrix[:, (ncols * i):(ncols * (i + 1))] = shard.values
del shard
normalize(joined_matrix, axis=1, norm='l2', copy=False)
dframe = pd.DataFrame(joined_matrix, index=joined_labels)
if sort:
dframe.sort_index(inplace=True)
save_hdf(dframe, output_filename)
def retrofit(row_labels, dense_frame, sparse_csr,
iterations=5, verbosity=0, max_cleanup_iters=20,
orig_vec_weight=0.15):
"""
Retrofitting is a process of combining information from a machine-learned
space of term vectors with further structured information about those
terms. It was originally presented in this 2015 NAACL paper by Manaal
Faruqui, Jesse Dodge, Sujay Jauhar, Chris Dyer, Eduard Hovy, and Noah
Smith, "Retrofitting Word Vectors to Semantic Lexicons":
https://www.cs.cmu.edu/~hovy/papers/15HLT-retrofitting-word-vectors.pdf
This function implements a variant that I've been calling "wide
retrofitting", which extends the process to learn vectors for terms that
were outside the original space.
`row_labels` is the list of terms that we want to have vectors for.
`dense_frame` is a DataFrame assigning vectors to some of these terms.
`sparse_csr` is a SciPy sparse square matrix, whose rows and columns are
implicitly labeled with `row_labels`. The entries of this matrix are
positive for terms that we know are related from our structured data.
(This is an awkward form of input, but unfortunately there is no good
way to represent sparse labeled data in Pandas.)
`sharded_retrofit` is responsible for building `row_labels` and `sparse_csr`
appropriately.
"""
# Initialize a DataFrame with rows that we know
retroframe = pd.DataFrame(
index=row_labels, columns=dense_frame.columns, dtype='f'
)
retroframe.update(dense_frame)
# orig_weights = 1 for known vectors, 0 for unknown vectors
orig_weights = 1 - retroframe.iloc[:, 0].isnull()
orig_vec_indicators = (orig_weights.values != 0)
orig_vecs = retroframe.fillna(0).values
# Subtract the mean so that vectors don't just clump around common
# hypernyms
orig_vecs[orig_vec_indicators] -= orig_vecs[orig_vec_indicators].mean(0)
# Delete the frame we built, we won't need its indices again until the end
del retroframe
vecs = orig_vecs
for iteration in range(iterations):
if verbosity >= 1:
print('Retrofitting: Iteration %s of %s' % (iteration+1, iterations))
# Since the sparse weight matrix is row-stochastic and has self-loops,
# pre-multiplication by it replaces each vector by a weighted average
# of itself and its neighbors. We really want to take the average
# of (itself and) the nonzero neighbors, which we can do by dividing
# the average with all the neighbors by the total of the weights of the
# nonzero neighbors. This avoids unduly shrinking vectors assigned to
# terms with lots of zero neighbors.
# Find, for every term, the total weight of its nonzero neighbors.
nonzero_indicators = (np.abs(vecs).sum(1) != 0)
total_neighbor_weights = sparse_csr.dot(nonzero_indicators)
# Now average with all the neighbors.
vecs = sparse_csr.dot(vecs)
# Now divide each vector (row) by the associated total weight.
# Some of the total weights could be zero, but only for rows that,
# before averaging, were zero and had all neighbors zero, whence
# after averaging will be zero. So only do the division for rows
# that are nonzero now, after averaging. Also, we reshape the total
# weights into a column vector so that numpy will broadcast the
# division by weights across the columns of the embedding matrix.
nonzero_indicators = (np.abs(vecs).sum(1) != 0)
total_neighbor_weights = total_neighbor_weights[nonzero_indicators]
total_neighbor_weights = total_neighbor_weights.reshape((len(total_neighbor_weights), 1))
vecs[nonzero_indicators] /= total_neighbor_weights
# Re-center the (new) non-zero vectors.
vecs[nonzero_indicators] -= vecs[nonzero_indicators].mean(0)
# Average known rows with original vectors
vecs[orig_vec_indicators, :] = \
(1.0 - orig_vec_weight) * vecs[orig_vec_indicators, :] + orig_vec_weight * orig_vecs[orig_vec_indicators, :]
# Clean up as many all-zero vectors as possible. Zero vectors
# can either come from components of the conceptnet graph that
# don't contain any terms from the embedding we are currently
# retrofitting (and there is nothing we can do about those here,
# but when retrofitting is done on that embedding they should be
# taken care of then) or from terms whose distance in the graph is
# larger than the number of retrofitting iterations used above; we
# propagate non-zero values to those terms by averaging over their
# non-zero neighbors. Note that this propagation can never reach
# the first class of terms, so we can't necessarily expect the
# number of zero vectors to go to zero at any one invocation of
# this code.
n_zero_indicators_old = -1
for iteration in range(max_cleanup_iters):
zero_indicators = (np.abs(vecs).sum(1) == 0)
n_zero_indicators = np.sum(zero_indicators)
if n_zero_indicators == 0 or n_zero_indicators == n_zero_indicators_old:
break
n_zero_indicators_old = n_zero_indicators
# First replace each zero vector (row) by the weighted average of all its
# neighbors.
vecs[zero_indicators, :] = sparse_csr[zero_indicators, :].dot(vecs)
# Now divide each newly nonzero vector (row) by the total weight of its
# old nonzero neighbors.
new_nonzero_indicators = np.logical_and(zero_indicators, np.abs(vecs).sum(1) != 0)
total_neighbor_weights = sparse_csr[new_nonzero_indicators, :].dot(np.logical_not(zero_indicators))
total_neighbor_weights = total_neighbor_weights.reshape((len(total_neighbor_weights), 1))
vecs[new_nonzero_indicators, :] /= total_neighbor_weights
else:
print('Warning: cleanup iteration limit exceeded.')
retroframe = pd.DataFrame(data=vecs, index=row_labels, columns=dense_frame.columns)
return retroframe
| 2,009
| 0
| 46
|
fcd076838a13b16b0181931dfa476968f0b03f64
| 11,297
|
py
|
Python
|
Stock_Analysis/auto_value_stock.py
|
parmarsuraj99/Finance
|
d9f012e33a99b959fdde575feedeb5922b379fe2
|
[
"MIT"
] | 1
|
2022-02-25T01:25:21.000Z
|
2022-02-25T01:25:21.000Z
|
Stock_Analysis/auto_value_stock.py
|
StockScripts/Finance
|
330bb46ea8e4c7ad5f3150cfa6d25e356178b189
|
[
"MIT"
] | null | null | null |
Stock_Analysis/auto_value_stock.py
|
StockScripts/Finance
|
330bb46ea8e4c7ad5f3150cfa6d25e356178b189
|
[
"MIT"
] | 2
|
2021-01-28T21:52:30.000Z
|
2021-02-16T13:26:35.000Z
|
# Code from https://medium.com/datadriveninvestor/use-python-to-value-a-stock-automatically-3b520422ab6 by Bohmian
# Importing required modules
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import numpy as np
import time
from config import financial_model_prep
pd.set_option('display.max_columns', None)
# Settings to produce nice plots in a Jupyter notebook
plt.style.use('fivethirtyeight')
plt.rcParams['figure.figsize'] = [15, 10]
import seaborn as sns
# To extract and parse fundamental data from finviz website
import requests
from bs4 import BeautifulSoup as bs
import warnings
warnings.filterwarnings('ignore')
# For parsing financial statements data from financialmodelingprep api
from urllib.request import urlopen
import json
# inputs
base_url = "https://financialmodelingprep.com/api/v3/"
tickers = ['AAL']
apiKey = financial_model_prep()
cash_flows = []
total_debts = []
cash_and_ST_investments_list = []
betas = []
discount_rates = []
EPS_growth_5Ys = []
EPS_growth_6Y_to_10Ys = []
EPS_growth_11Y_to_20Ys = []
shares_outstandings = []
intrinsic_values = []
current_prices = []
margins_safety = []
valid_tickers = []
for ticker in tickers:
try:
q_cash_flow_statement = pd.DataFrame(get_jsonparsed_data(base_url+'cash-flow-statement/' + ticker + '?period=quarter' + '&apikey=' + apiKey))
q_cash_flow_statement = q_cash_flow_statement.set_index('date').iloc[:4] # extract for last 4 quarters
q_cash_flow_statement = q_cash_flow_statement.apply(pd.to_numeric, errors='coerce')
cash_flow_statement = pd.DataFrame(get_jsonparsed_data(base_url+'cash-flow-statement/' + ticker + '?apikey=' + apiKey))
cash_flow_statement = cash_flow_statement.set_index('date')
cash_flow_statement = cash_flow_statement.apply(pd.to_numeric, errors='coerce')
ttm_cash_flow_statement = q_cash_flow_statement.sum() # sum up last 4 quarters to get TTM cash flow
cash_flow_statement = cash_flow_statement[::-1].append(ttm_cash_flow_statement.rename('TTM')).drop(['netIncome'], axis=1)
final_cash_flow_statement = cash_flow_statement[::-1] # reverse list to show most recent ones first
# final_cash_flow_statement[['freeCashFlow']].iloc[::-1].iloc[-15:].plot(kind='bar', title=ticker + ' Cash Flows')
# plt.show()
q_balance_statement = pd.DataFrame(get_jsonparsed_data(base_url+'balance-sheet-statement/' + ticker + '?period=quarter' + '&apikey=' + apiKey))
q_balance_statement = q_balance_statement.set_index('date')
q_balance_statement = q_balance_statement.apply(pd.to_numeric, errors='coerce')
cash_flow = final_cash_flow_statement.iloc[0]['freeCashFlow']
total_debt = q_balance_statement.iloc[0]['totalDebt']
cash_and_ST_investments = q_balance_statement.iloc[0]['cashAndShortTermInvestments']
# print("Free Cash Flow: ", cash_flow)
# print("Total Debt: ", total_debt)
# print("Cash and ST Investments: ", cash_and_ST_investments)
# List of data we want to extract from Finviz Table
metric = ['Price', 'EPS next 5Y', 'Beta', 'Shs Outstand']
finviz_data = get_finviz_data(ticker)
# print('\nFinViz Data:\n' + str(finviz_data))
Beta = finviz_data['Beta']
discount_rate = 7
if(Beta<0.80):
discount_rate = 5
elif(Beta>=0.80 and Beta<1):
discount_rate = 6
elif(Beta>=1 and Beta<1.1):
discount_rate = 6.5
elif(Beta>=1.1 and Beta<1.2):
discount_rate = 7
elif(Beta>=1.2 and Beta<1.3):
discount_rate =7.5
elif(Beta>=1.3 and Beta<1.4):
discount_rate = 8
elif(Beta>=1.4 and Beta<1.6):
discount_rate = 8.5
elif(Beta>=1.61):
discount_rate = 9
# print("\nDiscount Rate: ", discount_rate)
EPS_growth_5Y = finviz_data['EPS next 5Y']
EPS_growth_6Y_to_10Y = EPS_growth_5Y/2 # Half the previous growth rate, conservative estimate
EPS_growth_11Y_to_20Y = np.minimum(EPS_growth_6Y_to_10Y, 4) # Slightly higher than long term inflation rate, conservative estimate
shares_outstanding = round(finviz_data['Shs Outstand'])
# print("Free Cash Flow: ", cash_flow)
# print("Total Debt: ", total_debt)
# print("Cash and ST Investments: ", cash_and_ST_investments)
# print("EPS Growth 5Y: ", EPS_growth_5Y)
# print("EPS Growth 6Y to 10Y: ", EPS_growth_6Y_to_10Y)
# print("EPS Growth 11Y to 20Y: ", EPS_growth_11Y_to_20Y)
# print("Discount Rate: ", discount_rate)
# print("Shares Outstanding: ", shares_outstanding)
intrinsic_value = round(calculate_intrinsic_value(cash_flow, total_debt, cash_and_ST_investments,
EPS_growth_5Y, EPS_growth_6Y_to_10Y, EPS_growth_11Y_to_20Y,
shares_outstanding, discount_rate), 2)
# print("\nIntrinsic Value: ", intrinsic_value)
current_price = finviz_data['Price']
# print("Current Price: ", current_price)
change = round(((intrinsic_value-current_price)/current_price)*100, 2)
# print("Margin of Safety: ", margin_safety)
cash_flows.append(cash_flow)
total_debts.append(total_debt)
cash_and_ST_investments_list.append(cash_and_ST_investments)
betas.append(Beta)
discount_rates.append(discount_rate)
EPS_growth_5Ys.append(EPS_growth_5Y)
EPS_growth_6Y_to_10Ys.append(EPS_growth_6Y_to_10Y)
EPS_growth_11Y_to_20Ys.append(EPS_growth_11Y_to_20Y)
shares_outstandings.append(shares_outstanding)
intrinsic_values.append(intrinsic_value)
current_prices.append(current_price)
margins_safety.append(change)
valid_tickers.append(ticker)
except:
pass
df = pd.DataFrame(np.column_stack([valid_tickers, cash_flows, total_debts, cash_and_ST_investments_list, betas, discount_rates, EPS_growth_5Ys, EPS_growth_6Y_to_10Ys, EPS_growth_11Y_to_20Ys, shares_outstandings, intrinsic_values, current_prices, margins_safety]),
columns=['Ticker', 'Cash Flow', 'Total Debt', 'Cash and ST investment', 'Beta', 'Discount Rate', 'EPS Growth 5 Y', 'EPS Growth 6-10 Y', 'EPS Growth 11-20 Y', 'Shares Outstanding', 'Intrinsic Value', 'Current Price', 'Margin Safety']).set_index('Ticker')
df = df.sort_values(['Margin Safety'], ascending=True)
df.to_csv(f'{time.time()}.csv')
print (df)
| 46.681818
| 284
| 0.615208
|
# Code from https://medium.com/datadriveninvestor/use-python-to-value-a-stock-automatically-3b520422ab6 by Bohmian
# Importing required modules
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import numpy as np
import time
from config import financial_model_prep
pd.set_option('display.max_columns', None)
# Settings to produce nice plots in a Jupyter notebook
plt.style.use('fivethirtyeight')
plt.rcParams['figure.figsize'] = [15, 10]
import seaborn as sns
# To extract and parse fundamental data from finviz website
import requests
from bs4 import BeautifulSoup as bs
import warnings
warnings.filterwarnings('ignore')
# For parsing financial statements data from financialmodelingprep api
from urllib.request import urlopen
import json
def get_jsonparsed_data(url):
response = urlopen(url)
data = response.read().decode("utf-8")
return json.loads(data)
# inputs
base_url = "https://financialmodelingprep.com/api/v3/"
tickers = ['AAL']
apiKey = financial_model_prep()
cash_flows = []
total_debts = []
cash_and_ST_investments_list = []
betas = []
discount_rates = []
EPS_growth_5Ys = []
EPS_growth_6Y_to_10Ys = []
EPS_growth_11Y_to_20Ys = []
shares_outstandings = []
intrinsic_values = []
current_prices = []
margins_safety = []
valid_tickers = []
for ticker in tickers:
try:
q_cash_flow_statement = pd.DataFrame(get_jsonparsed_data(base_url+'cash-flow-statement/' + ticker + '?period=quarter' + '&apikey=' + apiKey))
q_cash_flow_statement = q_cash_flow_statement.set_index('date').iloc[:4] # extract for last 4 quarters
q_cash_flow_statement = q_cash_flow_statement.apply(pd.to_numeric, errors='coerce')
cash_flow_statement = pd.DataFrame(get_jsonparsed_data(base_url+'cash-flow-statement/' + ticker + '?apikey=' + apiKey))
cash_flow_statement = cash_flow_statement.set_index('date')
cash_flow_statement = cash_flow_statement.apply(pd.to_numeric, errors='coerce')
ttm_cash_flow_statement = q_cash_flow_statement.sum() # sum up last 4 quarters to get TTM cash flow
cash_flow_statement = cash_flow_statement[::-1].append(ttm_cash_flow_statement.rename('TTM')).drop(['netIncome'], axis=1)
final_cash_flow_statement = cash_flow_statement[::-1] # reverse list to show most recent ones first
# final_cash_flow_statement[['freeCashFlow']].iloc[::-1].iloc[-15:].plot(kind='bar', title=ticker + ' Cash Flows')
# plt.show()
q_balance_statement = pd.DataFrame(get_jsonparsed_data(base_url+'balance-sheet-statement/' + ticker + '?period=quarter' + '&apikey=' + apiKey))
q_balance_statement = q_balance_statement.set_index('date')
q_balance_statement = q_balance_statement.apply(pd.to_numeric, errors='coerce')
cash_flow = final_cash_flow_statement.iloc[0]['freeCashFlow']
total_debt = q_balance_statement.iloc[0]['totalDebt']
cash_and_ST_investments = q_balance_statement.iloc[0]['cashAndShortTermInvestments']
# print("Free Cash Flow: ", cash_flow)
# print("Total Debt: ", total_debt)
# print("Cash and ST Investments: ", cash_and_ST_investments)
# List of data we want to extract from Finviz Table
metric = ['Price', 'EPS next 5Y', 'Beta', 'Shs Outstand']
def fundamental_metric(soup, metric):
# the table which stores the data in Finviz has html table attribute class of 'snapshot-td2'
return soup.find(text = metric).find_next(class_='snapshot-td2').text
def get_finviz_data(ticker):
try:
url = ("http://finviz.com/quote.ashx?t=" + ticker.lower())
soup = bs(requests.get(url,headers={'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:20.0) Gecko/20100101 Firefox/20.0'}).content)
dict_finviz = {}
for m in metric:
dict_finviz[m] = fundamental_metric(soup,m)
for key, value in dict_finviz.items():
# replace percentages
if (value[-1]=='%'):
dict_finviz[key] = value[:-1]
dict_finviz[key] = float(dict_finviz[key])
# billion
if (value[-1]=='B'):
dict_finviz[key] = value[:-1]
dict_finviz[key] = float(dict_finviz[key])*1000000000
# million
if (value[-1]=='M'):
dict_finviz[key] = value[:-1]
dict_finviz[key] = float(dict_finviz[key])*1000000
try:
dict_finviz[key] = float(dict_finviz[key])
except:
pass
except Exception as e:
print (e)
print ('Not successful parsing ' + ticker + ' data.')
return dict_finviz
finviz_data = get_finviz_data(ticker)
# print('\nFinViz Data:\n' + str(finviz_data))
Beta = finviz_data['Beta']
discount_rate = 7
if(Beta<0.80):
discount_rate = 5
elif(Beta>=0.80 and Beta<1):
discount_rate = 6
elif(Beta>=1 and Beta<1.1):
discount_rate = 6.5
elif(Beta>=1.1 and Beta<1.2):
discount_rate = 7
elif(Beta>=1.2 and Beta<1.3):
discount_rate =7.5
elif(Beta>=1.3 and Beta<1.4):
discount_rate = 8
elif(Beta>=1.4 and Beta<1.6):
discount_rate = 8.5
elif(Beta>=1.61):
discount_rate = 9
# print("\nDiscount Rate: ", discount_rate)
EPS_growth_5Y = finviz_data['EPS next 5Y']
EPS_growth_6Y_to_10Y = EPS_growth_5Y/2 # Half the previous growth rate, conservative estimate
EPS_growth_11Y_to_20Y = np.minimum(EPS_growth_6Y_to_10Y, 4) # Slightly higher than long term inflation rate, conservative estimate
shares_outstanding = round(finviz_data['Shs Outstand'])
# print("Free Cash Flow: ", cash_flow)
# print("Total Debt: ", total_debt)
# print("Cash and ST Investments: ", cash_and_ST_investments)
# print("EPS Growth 5Y: ", EPS_growth_5Y)
# print("EPS Growth 6Y to 10Y: ", EPS_growth_6Y_to_10Y)
# print("EPS Growth 11Y to 20Y: ", EPS_growth_11Y_to_20Y)
# print("Discount Rate: ", discount_rate)
# print("Shares Outstanding: ", shares_outstanding)
def calculate_intrinsic_value(cash_flow, total_debt, cash_and_ST_investments,
EPS_growth_5Y, EPS_growth_6Y_to_10Y, EPS_growth_11Y_to_20Y,
shares_outstanding, discount_rate):
# Convert all percentages to decmials
EPS_growth_5Y_d = EPS_growth_5Y/100
EPS_growth_6Y_to_10Y_d = EPS_growth_6Y_to_10Y/100
EPS_growth_11Y_to_20Y_d = EPS_growth_11Y_to_20Y/100
discount_rate_d = discount_rate/100
# print("\nDiscounted Cash Flows")
# Lists of projected cash flows from year 1 to year 20
cash_flow_list = []
cash_flow_discounted_list = []
year_list = []
# Years 1 to 5
for year in range(1, 6):
year_list.append(year)
cash_flow*=(1 + EPS_growth_5Y_d)
cash_flow_list.append(cash_flow)
cash_flow_discounted = cash_flow/((1 + discount_rate_d)**year)
cash_flow_discounted_list.append(cash_flow_discounted)
# print("Year " + str(year) + ": $" + str(cash_flow_discounted)) ## Print out the projected discounted cash flows
# Years 6 to 10
for year in range(6, 11):
year_list.append(year)
cash_flow*=(1 + EPS_growth_6Y_to_10Y_d)
cash_flow_list.append(cash_flow)
cash_flow_discounted = cash_flow/((1 + discount_rate_d)**year)
cash_flow_discounted_list.append(cash_flow_discounted)
# print("Year " + str(year) + ": $" + str(cash_flow_discounted)) ## Print out the projected discounted cash flows
# Years 11 to 20
for year in range(11, 21):
year_list.append(year)
cash_flow*=(1 + EPS_growth_11Y_to_20Y_d)
cash_flow_list.append(cash_flow)
cash_flow_discounted = cash_flow/((1 + discount_rate_d)**year)
cash_flow_discounted_list.append(cash_flow_discounted)
# print("Year " + str(year) + ": $" + str(cash_flow_discounted)) ## Print out the projected discounted cash flows
intrinsic_value = (sum(cash_flow_discounted_list) - total_debt + cash_and_ST_investments)/shares_outstanding
df = pd.DataFrame.from_dict({'Year': year_list, 'Cash Flow': cash_flow_list, 'Discounted Cash Flow': cash_flow_discounted_list})
df.index = df.Year
# df.plot(kind='bar', title = 'Projected Cash Flows of ' + ticker)
# plt.show()
return intrinsic_value
intrinsic_value = round(calculate_intrinsic_value(cash_flow, total_debt, cash_and_ST_investments,
EPS_growth_5Y, EPS_growth_6Y_to_10Y, EPS_growth_11Y_to_20Y,
shares_outstanding, discount_rate), 2)
# print("\nIntrinsic Value: ", intrinsic_value)
current_price = finviz_data['Price']
# print("Current Price: ", current_price)
change = round(((intrinsic_value-current_price)/current_price)*100, 2)
# print("Margin of Safety: ", margin_safety)
cash_flows.append(cash_flow)
total_debts.append(total_debt)
cash_and_ST_investments_list.append(cash_and_ST_investments)
betas.append(Beta)
discount_rates.append(discount_rate)
EPS_growth_5Ys.append(EPS_growth_5Y)
EPS_growth_6Y_to_10Ys.append(EPS_growth_6Y_to_10Y)
EPS_growth_11Y_to_20Ys.append(EPS_growth_11Y_to_20Y)
shares_outstandings.append(shares_outstanding)
intrinsic_values.append(intrinsic_value)
current_prices.append(current_price)
margins_safety.append(change)
valid_tickers.append(ticker)
except:
pass
df = pd.DataFrame(np.column_stack([valid_tickers, cash_flows, total_debts, cash_and_ST_investments_list, betas, discount_rates, EPS_growth_5Ys, EPS_growth_6Y_to_10Ys, EPS_growth_11Y_to_20Ys, shares_outstandings, intrinsic_values, current_prices, margins_safety]),
columns=['Ticker', 'Cash Flow', 'Total Debt', 'Cash and ST investment', 'Beta', 'Discount Rate', 'EPS Growth 5 Y', 'EPS Growth 6-10 Y', 'EPS Growth 11-20 Y', 'Shares Outstanding', 'Intrinsic Value', 'Current Price', 'Margin Safety']).set_index('Ticker')
df = df.sort_values(['Margin Safety'], ascending=True)
df.to_csv(f'{time.time()}.csv')
print (df)
| 4,418
| 0
| 143
|
901b7a71198943a53f223f18bbc124edf656a124
| 2,580
|
py
|
Python
|
src/100_simple_aggregation.py
|
j20232/kaggle_earthquake
|
47fac5f2e8d2ad4fab82426a0b6af18b71e4b57b
|
[
"MIT"
] | null | null | null |
src/100_simple_aggregation.py
|
j20232/kaggle_earthquake
|
47fac5f2e8d2ad4fab82426a0b6af18b71e4b57b
|
[
"MIT"
] | null | null | null |
src/100_simple_aggregation.py
|
j20232/kaggle_earthquake
|
47fac5f2e8d2ad4fab82426a0b6af18b71e4b57b
|
[
"MIT"
] | null | null | null |
"""Extract simple aggregation features
Reference: https://www.kaggle.com/gpreda/lanl-earthquake-eda-and-prediction
"""
import sys
import numpy as np
import pandas as pd
from pathlib import Path
from tqdm import tqdm
import competition as cc
from common import stop_watch
TRAIN_CSV_DIRECTORY_PATH = cc.INPUT_PATH / sys.argv[1]
TRAIN_CSV_LIST = list(TRAIN_CSV_DIRECTORY_PATH.glob('**/*.csv'))
@stop_watch
if __name__ == "__main__":
train_csv_path = cc.FEATURE_PATH / "{}".format(sys.argv[1])
train_csv_l = [str(item) for item in TRAIN_CSV_LIST]
extract_features(train_csv_l, train_csv_path)
test_csv_path = cc.FEATURE_PATH / "test"
test_csv_l = [str(item) for item in cc.TEST_CSV_LIST]
extract_features(test_csv_l, test_csv_path)
| 38.507463
| 91
| 0.622481
|
"""Extract simple aggregation features
Reference: https://www.kaggle.com/gpreda/lanl-earthquake-eda-and-prediction
"""
import sys
import numpy as np
import pandas as pd
from pathlib import Path
from tqdm import tqdm
import competition as cc
from common import stop_watch
TRAIN_CSV_DIRECTORY_PATH = cc.INPUT_PATH / sys.argv[1]
TRAIN_CSV_LIST = list(TRAIN_CSV_DIRECTORY_PATH.glob('**/*.csv'))
@stop_watch
def extract_features(csv_list, feature_dir_path):
df = pd.DataFrame()
Path.mkdir(feature_dir_path, exist_ok=True, parents=True)
for index, each_csv in enumerate(tqdm(sorted(csv_list))):
seg = pd.read_csv(each_csv, dtype=cc.DTYPES)
seg_id = each_csv.split("/")[-1].split(".")[0]
df.loc[index, "seg_id"] = seg_id
xc = pd.Series(seg['acoustic_data'].values)
# basic aggregation
df.loc[index, "mean"] = xc.mean()
df.loc[index, "std"] = xc.std()
df.loc[index, "max"] = xc.max()
df.loc[index, "min"] = xc.min()
df.loc[index, 'sum'] = xc.sum()
df.loc[index, 'mad'] = xc.mad()
df.loc[index, 'kurtosis'] = xc.kurtosis()
df.loc[index, 'skew'] = xc.skew()
df.loc[index, 'median'] = xc.median()
df.loc[index, 'mean_change_rate'] = np.mean(np.nonzero((np.diff(xc) / xc[:-1]))[0])
# abs aggregation
df.loc[index, 'abs_mean'] = np.abs(xc).mean()
df.loc[index, 'abs_std'] = np.abs(xc).std()
df.loc[index, 'abs_max'] = np.abs(xc).max()
df.loc[index, 'abs_min'] = np.abs(xc).min()
df.loc[index, 'abs_sum'] = np.abs(xc).sum()
df.loc[index, 'abs_mad'] = np.abs(xc).mad()
df.loc[index, 'abs_kurtosis'] = np.abs(xc).kurtosis()
df.loc[index, 'abs_skew'] = np.abs(xc).skew()
df.loc[index, 'abs_median'] = np.abs(xc).median()
df.loc[index, 'mean_change_abs'] = np.mean(np.diff(xc))
df.loc[index, 'max_to_min'] = xc.max() / np.abs(xc.min())
df.loc[index, 'max_to_min_diff'] = xc.max() - np.abs(xc.min())
df.loc[index, 'count_big'] = len(xc[np.abs(xc) > 500])
print("Aggregation output is belows:")
print(df.head(3))
df.to_csv(feature_dir_path / "{}.csv".format(cc.PREF), index=False)
if __name__ == "__main__":
train_csv_path = cc.FEATURE_PATH / "{}".format(sys.argv[1])
train_csv_l = [str(item) for item in TRAIN_CSV_LIST]
extract_features(train_csv_l, train_csv_path)
test_csv_path = cc.FEATURE_PATH / "test"
test_csv_l = [str(item) for item in cc.TEST_CSV_LIST]
extract_features(test_csv_l, test_csv_path)
| 1,795
| 0
| 22
|
0b3eba4af37debbbb40bec37c6e9b379c1156729
| 8,817
|
py
|
Python
|
segment.py
|
neelsj/syndata-generation
|
df73cc9a146c34870c3d80acce0ca04b314ec1b0
|
[
"MIT"
] | null | null | null |
segment.py
|
neelsj/syndata-generation
|
df73cc9a146c34870c3d80acce0ca04b314ec1b0
|
[
"MIT"
] | null | null | null |
segment.py
|
neelsj/syndata-generation
|
df73cc9a146c34870c3d80acce0ca04b314ec1b0
|
[
"MIT"
] | null | null | null |
import os
from datetime import datetime
import json
import matplotlib.pyplot as plt
from tqdm import tqdm
import torch
import numpy as np
from skimage import measure
from shapely.geometry import Polygon, MultiPolygon
from PIL import Image
import cv2
#model = torch.hub.load('pytorch/vision:v0.10.0', 'deeplabv3_resnet50', pretrained=True)
model = torch.hub.load('pytorch/vision:v0.10.0', 'deeplabv3_resnet101', pretrained=True)
# model = torch.hub.load('pytorch/vision:v0.10.0', 'deeplabv3_mobilenet_v3_large', pretrained=True)
model.eval()
from torchvision import transforms
COCO_INFO = {
"description": "",
"url": "",
"version": "1",
"year": 2022,
"contributor": "MSR CV Group",
"date_created": datetime.now().strftime("%m/%d/%Y")
}
COCO_LICENSES = [{
"url": "",
"id": 0,
"name": "License"
}]
if __name__ == "__main__":
data_dir = "E:/Research/Images/FineGrained/StanfordCars/train_bing/"
| 31.830325
| 132
| 0.565612
|
import os
from datetime import datetime
import json
import matplotlib.pyplot as plt
from tqdm import tqdm
import torch
import numpy as np
from skimage import measure
from shapely.geometry import Polygon, MultiPolygon
from PIL import Image
import cv2
#model = torch.hub.load('pytorch/vision:v0.10.0', 'deeplabv3_resnet50', pretrained=True)
model = torch.hub.load('pytorch/vision:v0.10.0', 'deeplabv3_resnet101', pretrained=True)
# model = torch.hub.load('pytorch/vision:v0.10.0', 'deeplabv3_mobilenet_v3_large', pretrained=True)
model.eval()
from torchvision import transforms
COCO_INFO = {
"description": "",
"url": "",
"version": "1",
"year": 2022,
"contributor": "MSR CV Group",
"date_created": datetime.now().strftime("%m/%d/%Y")
}
COCO_LICENSES = [{
"url": "",
"id": 0,
"name": "License"
}]
def create_mask(input_image):
input_image = input_image.convert("RGB")
preprocess = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
input_tensor = preprocess(input_image)
input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
# move the input and model to GPU for speed if available
if torch.cuda.is_available():
input_batch = input_batch.to('cuda')
model.to('cuda')
with torch.no_grad():
output = model(input_batch)['out'][0]
output_predictions = output.argmax(0)
# plot the semantic segmentation predictions of 21 classes in each color
mask = np.uint8(255*(output_predictions.cpu().numpy() > 0))
#mask = output_predictions.byte().cpu().numpy()
return mask
def create_sub_mask_annotation(sub_mask, image_id, category_id, annotation_id, is_crowd, bbox=None):
# Find contours (boundary lines) around each sub-mask
# Note: there could be multiple contours if the object
# is partially occluded. (E.g. an elephant behind a tree)
#contours = measure.find_contours(sub_mask, 0.5, positive_orientation='low')
padded_binary_mask = np.pad(sub_mask, pad_width=1, mode='constant', constant_values=0)
contours = measure.find_contours(padded_binary_mask, 0.5, positive_orientation='low')
segmentations = []
polygons = []
for contour in contours:
# Flip from (row, col) representation to (x, y)
# and subtract the padding pixel
for i in range(len(contour)):
row, col = contour[i]
contour[i] = (col - 1, row - 1)
# Make a polygon and simplify it
poly = Polygon(contour)
poly = poly.simplify(1.0, preserve_topology=False)
polygons.append(poly)
segmentation = np.array(poly.exterior.coords).ravel().tolist()
segmentations.append(segmentation)
# Combine the polygons to calculate the bounding box and area
multi_poly = MultiPolygon(polygons)
x, y, max_x, max_y = multi_poly.bounds
width = max_x - x
height = max_y - y
bbox = bbox if (bbox) else (x, y, width, height)
area = multi_poly.area
annotation = {
'segmentation': segmentations,
'iscrowd': is_crowd,
'image_id': image_id,
'category_id': category_id,
'id': annotation_id,
'bbox': bbox,
'area': area
}
return annotation
def generate_masks(data_dir, background=False):
dirs = os.listdir(data_dir)
# create a color pallette, selecting a color for each class
palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])
colors = torch.as_tensor([i for i in range(21)])[:, None] * palette
colors = (colors % 255).numpy().astype("uint8")
prcThresh = 3
images = []
annotations = []
image_id = 1
category_id = 1
annotation_id = 1
categories = []
for dir in tqdm(dirs):
files_dir = os.path.join(data_dir, dir)
if (not os.path.isdir(files_dir)):
continue
files = os.listdir(files_dir)
files = [file for file in files if "_mask" not in file]
category = {"supercategory": "object", "id": category_id, "name": dir}
categories.append(category)
for file in tqdm(files):
filename = os.path.join(data_dir, dir, file)
#print(filename)
image = Image.open(filename)
new_img={}
new_img["license"] = 0
new_img["file_name"] = os.path.join(dir, file)
new_img["width"] = int(image.size[0])
new_img["height"] = int(image.size[1])
new_img["id"] = image_id
images.append(new_img)
mask = create_mask(image)
if (background):
maskname = os.path.splitext(filename)[0] + "_mask.jpg"
maskObj = np.uint8(255*(mask==0))
Image.fromarray(maskObj).save(maskname)
#plt.imshow(np.array(image)[:,:,0]*mask)
#plt.show()
else:
nb_components, output, boxes, centroids = cv2.connectedComponentsWithStats(mask, connectivity=8)
box_sizes = [box[4] for box in boxes[1:]]
for id in range(1, nb_components):
box = [int(b) for b in boxes[id][0:4]]
sub_mask = np.reshape(output==id, mask.shape).astype(np.double)
#plt.imshow(sub_mask)
#plt.show()
prc = 100*box_sizes[id-1]/(mask.shape[0]*mask.shape[1])
if (prc >= prcThresh):
try:
annotation = create_sub_mask_annotation(sub_mask, image_id, category_id, annotation_id, False, bbox=box)
annotations.append(annotation)
annotation_id += 1
except Exception as e:
print(e)
pass
#print(nb_components)
#print(output)
#print(stats)
#print(centroids)
# save mask for dominant big object
if (box_sizes):
max_ind = np.argmax(box_sizes)
#print(max_ind)
prc = 100*box_sizes[max_ind]/(mask.shape[0]*mask.shape[1])
#print(prc)
if (prc >= prcThresh):
maskname = os.path.splitext(filename)[0] + "_mask.jpg"
#print(maskname)
maskObj = np.uint8(255*np.reshape(1-(output==max_ind+1), mask.shape))
#maskObjN = 255-maskObj
#edgeSum = np.sum(maskObjN[:,0]) + np.sum(maskObjN[:,-1]) + np.sum(maskObjN[0,:]) + np.sum(maskObjN[-1,:])
#if (edgeSum == 0):
Image.fromarray(maskObj).save(maskname)
##mask.putpalette(colors)
#plt.subplot(121)
#plt.imshow(image)
#plt.subplot(122)
#plt.imshow(maskObj)
#plt.show()
image_id += 1
#if (image_id > 3):
# break
category_id += 1
#if (category_id > 3):
# break
print("saving annotations to coco as json ")
### create COCO JSON annotations
coco = {}
coco["info"] = COCO_INFO
coco["licenses"] = COCO_LICENSES
coco["images"] = images
coco["categories"] = categories
coco["annotations"] = annotations
# TODO: specify coco file locaiton
output_file_path = os.path.join(data_dir,"../", "coco_instances.json")
with open(output_file_path, 'w+') as json_file:
json_file.write(json.dumps(coco))
print(">> complete. find coco json here: ", output_file_path)
print("last annotation id: ", annotation_id)
print("last image_id: ", image_id)
#from pycocotools.coco import COCO
## Initialize the COCO api for instance annotations
#coco = COCO(output_file_path)
## Load the categories in a variable
#imgIds = coco.getImgIds()
#print("Number of images:", len(imgIds))
## load and display a random image
#for i in range(len(imgIds)):
# img = coco.loadImgs(imgIds[i])[0]
# I = Image.open(data_dir + "/" + img['file_name'])
# plt.clf()
# plt.imshow(I)
# plt.axis('off')
# annIds = coco.getAnnIds(imgIds=img['id'])
# anns = coco.loadAnns(annIds)
# coco.showAnns(anns, True)
# plt.waitforbuttonpress()
if __name__ == "__main__":
data_dir = "E:/Research/Images/FineGrained/StanfordCars/train_bing/"
| 7,808
| 0
| 69
|
0326330a12bafbdb605fe605d3e7680654a1a51a
| 802
|
py
|
Python
|
tests/unit/common/query/test_expression_query_results_reader.py
|
ambrosejcarr/matrix-service
|
f61252d79941fa962240e27062682c9676f07e95
|
[
"MIT"
] | 11
|
2018-10-26T20:47:55.000Z
|
2022-02-02T10:32:42.000Z
|
tests/unit/common/query/test_expression_query_results_reader.py
|
ambrosejcarr/matrix-service
|
f61252d79941fa962240e27062682c9676f07e95
|
[
"MIT"
] | 379
|
2018-06-04T22:44:33.000Z
|
2020-06-03T00:20:08.000Z
|
tests/unit/common/query/test_expression_query_results_reader.py
|
ambrosejcarr/matrix-service
|
f61252d79941fa962240e27062682c9676f07e95
|
[
"MIT"
] | 4
|
2018-11-22T01:00:27.000Z
|
2020-09-01T16:42:05.000Z
|
import mock
import unittest
from matrix.common.query.expression_query_results_reader import ExpressionQueryResultsReader
| 42.210526
| 94
| 0.786783
|
import mock
import unittest
from matrix.common.query.expression_query_results_reader import ExpressionQueryResultsReader
class TestExpressionQueryResultsReader(unittest.TestCase):
@mock.patch("matrix.common.query.query_results_reader.QueryResultsReader._parse_manifest")
def test_load_results(self, mock_parse_manifest):
reader = ExpressionQueryResultsReader("test_manifest_key")
with self.assertRaises(NotImplementedError):
reader.load_results()
@mock.patch("matrix.common.query.query_results_reader.QueryResultsReader._parse_manifest")
def test_load_slice(self, mock_parse_manifest):
reader = ExpressionQueryResultsReader("test_manifest_key")
results = reader.load_slice(0)
self.assertEqual(type(results).__name__, 'generator')
| 376
| 280
| 23
|
1abc147f5b65fc34db7ff312e43a5af4e6f6fb0a
| 21,660
|
py
|
Python
|
analysis/graveyard/study_definition.py
|
opensafely/antibody-and-antiviral-deployment
|
27cd171870fdd161468d1cabd1eaee76f1943593
|
[
"MIT"
] | null | null | null |
analysis/graveyard/study_definition.py
|
opensafely/antibody-and-antiviral-deployment
|
27cd171870fdd161468d1cabd1eaee76f1943593
|
[
"MIT"
] | 1
|
2022-03-18T16:20:19.000Z
|
2022-03-18T16:20:19.000Z
|
analysis/graveyard/study_definition.py
|
opensafely/antibody-and-antiviral-deployment
|
27cd171870fdd161468d1cabd1eaee76f1943593
|
[
"MIT"
] | null | null | null |
################################################################################
#
# Description: This script provides the formal specification of the study data
# that will be extracted from the OpenSAFELY database.
#
# Output: output/data/input_*.csv.gz
#
# Author(s): M Green (edited by H Curtis)
# Date last updated: 03/02/2022
#
################################################################################
# IMPORT STATEMENTS ----
## Import code building blocks from cohort extractor package
from cohortextractor import (
StudyDefinition,
patients,
codelist_from_csv,
codelist,
filter_codes_by_category,
combine_codelists,
Measure
)
## Import codelists from codelist.py (which pulls them from the codelist folder)
from codelists import *
# DEFINE STUDY POPULATION ----
## Define study time variables
from datetime import date
campaign_start = "2021-12-16"
end_date = date.today().isoformat()
## Define study population and variables
study = StudyDefinition(
# PRELIMINARIES ----
## Configure the expectations framework
default_expectations = {
"date": {"earliest": "2021-11-01", "latest": "today"},
"rate": "uniform",
"incidence": 0.4,
},
## Define index date
index_date = campaign_start,
# POPULATION ----
population = patients.satisfying(
"""
(registered_eligible OR registered_treated)
AND
NOT has_died
AND
(sotrovimab_covid_therapeutics
OR molnupiravir_covid_therapeutics
OR casirivimab_covid_therapeutics
OR covid_test_positive
)
""",
has_died = patients.died_from_any_cause(
on_or_before = "index_date - 1 day",
returning = "binary_flag",
),
),
# TREATMENT - NEUTRALISING MONOCLONAL ANTIBODIES OR ANTIVIRALS ----
## Sotrovimab
sotrovimab_covid_therapeutics = patients.with_covid_therapeutics(
#with_these_statuses = ["Approved", "Treatment Complete"],
with_these_therapeutics = "Sotrovimab",
with_these_indications = "non_hospitalised",
on_or_after = "index_date",
find_first_match_in_period = True,
returning = "date",
date_format = "YYYY-MM-DD",
return_expectations = {
"date": {"earliest": "2021-12-20"},
"incidence": 0.4
},
),
### Molnupiravir
molnupiravir_covid_therapeutics = patients.with_covid_therapeutics(
#with_these_statuses = ["Approved", "Treatment Complete"],
with_these_therapeutics = "Molnupiravir",
with_these_indications = "non_hospitalised",
on_or_after = "index_date",
find_first_match_in_period = True,
returning = "date",
date_format = "YYYY-MM-DD",
return_expectations = {
"date": {"earliest": "2021-12-20"},
"incidence": 0.4
},
),
### Casirivimab and imdevimab
casirivimab_covid_therapeutics = patients.with_covid_therapeutics(
#with_these_statuses = ["Approved", "Treatment Complete"],
with_these_therapeutics = "Casirivimab and imdevimab",
with_these_indications = "non_hospitalised",
on_or_after = "index_date",
find_first_match_in_period = True,
returning = "date",
date_format = "YYYY-MM-DD",
return_expectations = {
"date": {"earliest": "2021-12-20"},
"incidence": 0.4
},
),
date_treated = patients.minimum_of(
"sotrovimab_covid_therapeutics",
"molnupiravir_covid_therapeutics",
"casirivimab_covid_therapeutics",
),
# ELIGIBILITY CRITERIA VARIABLES ----
## Inclusion criteria variables
### SARS-CoV-2 test
# Note patients are eligible for treatment if diagnosed <=5d ago
# in the latest 5 days there may be patients identified as eligible who have not yet been treated
covid_test_positive = patients.with_test_result_in_sgss(
pathogen = "SARS-CoV-2",
test_result = "positive",
returning = "binary_flag",
on_or_after = "index_date - 5 days",
find_first_match_in_period = True,
restrict_to_earliest_specimen_date = False,
return_expectations = {
"incidence": 0.2
},
),
covid_test_date = patients.with_test_result_in_sgss(
pathogen = "SARS-CoV-2",
test_result = "positive",
find_first_match_in_period = True,
restrict_to_earliest_specimen_date = False,
returning = "date",
date_format = "YYYY-MM-DD",
on_or_after = "index_date - 5 days",
return_expectations = {
"date": {"earliest": "2021-12-20", "latest": "index_date"},
"incidence": 0.9
},
),
covid_positive_test_type = patients.with_test_result_in_sgss(
pathogen = "SARS-CoV-2",
test_result = "positive",
returning = "case_category",
on_or_after = "index_date - 5 days",
restrict_to_earliest_specimen_date = True,
return_expectations = {
"category": {"ratios": {"LFT_Only": 0.4, "PCR_Only": 0.4, "LFT_WithPCR": 0.2}},
"incidence": 0.2,
},
),
covid_positive_previous_30_days = patients.with_test_result_in_sgss(
pathogen = "SARS-CoV-2",
test_result = "positive",
returning = "binary_flag",
between = ["covid_test_date - 31 days", "covid_test_date - 1 day"],
find_last_match_in_period = True,
restrict_to_earliest_specimen_date = False,
return_expectations = {
"incidence": 0.05
},
),
### Onset of symptoms of COVID-19
symptomatic_covid_test = patients.with_test_result_in_sgss(
pathogen = "SARS-CoV-2",
test_result = "any",
returning = "symptomatic",
on_or_after = "index_date - 5 days",
find_first_match_in_period = True,
restrict_to_earliest_specimen_date = False,
return_expectations={
"incidence": 0.1,
"category": {
"ratios": {
"": 0.2,
"N": 0.2,
"Y": 0.6,
}
},
},
),
covid_symptoms_snomed = patients.with_these_clinical_events(
covid_symptoms_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
on_or_after = "index_date - 5 days",
),
# CENSORING ----
registered_eligible = patients.registered_as_of("covid_test_date"),
registered_treated = patients.registered_as_of("date_treated"),
## Death of any cause
death_date = patients.died_from_any_cause(
returning = "date_of_death",
date_format = "YYYY-MM-DD",
on_or_after = "covid_test_date",
return_expectations = {
"date": {"earliest": "2021-12-20", "latest": "index_date"},
"incidence": 0.1
},
),
## De-registration
dereg_date = patients.date_deregistered_from_all_supported_practices(
on_or_after = "covid_test_date",
date_format = "YYYY-MM-DD",
return_expectations = {
"date": {"earliest": "2021-12-20", "latest": "index_date"},
"incidence": 0.1
},
),
### Blueteq ‘high risk’ cohort
high_risk_cohort_covid_therapeutics = patients.with_covid_therapeutics(
with_these_statuses = ["Approved", "Treatment Complete"],
with_these_therapeutics = ["Sotrovimab", "Molnupiravir","Casirivimab and imdevimab"],
with_these_indications = "non_hospitalised",
on_or_after = "index_date",
find_first_match_in_period = True,
returning = "risk_group",
date_format = "YYYY-MM-DD",
return_expectations = {
"rate": "universal",
"category": {
"ratios": {
"Down's syndrome": 0.1,
"Sickle cell disease": 0.1,
"solid cancer": 0.1,
"haematological diseases, stem cell transplant recipients": 0.1,
"renal disease": 0.1,
"liver disease": 0.1,
"immune-mediated inflammatory disorders (IMID)": 0.2,
"Primary immune deficiencies": 0.1,
"HIV/AIDS": 0.1,},},
},
),
### NHSD ‘high risk’ cohort (codelist to be defined if/when data avaliable)
# high_risk_cohort_nhsd = patients.with_these_clinical_events(
# high_risk_cohort_nhsd_codes,
# between = [campaign_start, index_date],
# returning = "date",
# date_format = "YYYY-MM-DD",
# find_first_match_in_period = True,
# ),
## Exclusion criteria
### Pattern of clinical presentation indicates that there is recovery rather than risk of deterioration from infection
# (not currently possible to define/code)
### Require hospitalisation for COVID-19
## NB this data lags behind the therapeutics/testing data so may be missing
covid_hospital_admission_date = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = covid_icd10_codes,
on_or_after = "index_date - 5 days",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
return_expectations = {
"date": {"earliest": "index_date - 5 days", "latest": "index_date"},
"rate": "uniform",
"incidence": 0.05
},
),
### New supplemental oxygen requirement specifically for the management of COVID-19 symptoms
# (not currently possible to define/code)
### Children weighing less than 40kg
# (not currently possible to define/code)
### Children aged under 12 years
age = patients.age_as_of(
"index_date",
return_expectations = {
"rate": "universal",
"int": {"distribution": "population_ages"},
"incidence" : 0.9
},
),
### Known hypersensitivity reaction to the active substances or to any of the excipients of sotrovimab
# (not currently possible to define/code)
# HIGH RISK GROUPS ----
## Down's syndrome
downs_syndrome_nhsd_snomed = patients.with_these_clinical_events(
downs_syndrome_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
downs_syndrome_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = downs_syndrome_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
downs_syndrome_nhsd = patients.minimum_of("downs_syndrome_nhsd_snomed", "downs_syndrome_nhsd_icd10"),
## Sickle cell disease
sickle_cell_disease_nhsd_snomed = patients.with_these_clinical_events(
sickle_cell_disease_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
sickle_cell_disease_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = sickle_cell_disease_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
sickle_cell_disease_nhsd = patients.minimum_of("sickle_cell_disease_nhsd_snomed", "sickle_cell_disease_nhsd_icd10"),
## Solid cancer
cancer_opensafely_snomed = patients.with_these_clinical_events(
combine_codelists(
non_haematological_cancer_opensafely_snomed_codes,
lung_cancer_opensafely_snomed_codes,
chemotherapy_radiotherapy_opensafely_snomed_codes
),
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
## Haematological diseases
haematopoietic_stem_cell_transplant_nhsd_snomed = patients.with_these_clinical_events(
haematopoietic_stem_cell_transplant_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
haematopoietic_stem_cell_transplant_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = haematopoietic_stem_cell_transplant_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
haematopoietic_stem_cell_transplant_nhsd_opcs4 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_procedures = haematopoietic_stem_cell_transplant_nhsd_opcs4_codes,
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
return_expectations = {
"date": {"earliest": "2020-02-01"},
"rate": "exponential_increase",
"incidence": 0.01,
},
),
# haematological_malignancies_nhsd_snomed = patients.with_these_clinical_events(
# haematological_malignancies_nhsd_snomed_codes,
# returning = "date",
# date_format = "YYYY-MM-DD",
# find_first_match_in_period = True,
# #on_or_before = "end_date",
# ),
haematological_malignancies_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = haematological_malignancies_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
haematological_disease_nhsd = patients.minimum_of("haematopoietic_stem_cell_transplant_nhsd_snomed",
"haematopoietic_stem_cell_transplant_nhsd_icd10",
"haematopoietic_stem_cell_transplant_nhsd_opcs4",
#"haematological_malignancies_nhsd_snomed",
"haematological_malignancies_nhsd_icd10"),
## Renal disease
ckd_stage_5_nhsd_snomed = patients.with_these_clinical_events(
ckd_stage_5_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
ckd_stage_5_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = ckd_stage_5_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
ckd_stage_5_nhsd = patients.minimum_of("ckd_stage_5_nhsd_snomed", "ckd_stage_5_nhsd_icd10"),
## Liver disease
liver_disease_nhsd_snomed = patients.with_these_clinical_events(
ckd_stage_5_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
liver_disease_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = ckd_stage_5_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
liver_disease_nhsd = patients.minimum_of("liver_disease_nhsd_snomed", "liver_disease_nhsd_icd10"),
## Immune-mediated inflammatory disorders (IMID)
imid_nhsd = patients.with_these_clinical_events(
codelist = combine_codelists(immunosuppresant_drugs_dmd_codes, immunosuppresant_drugs_snomed_codes,
oral_steroid_drugs_dmd_codes,
oral_steroid_drugs_snomed_codes),
returning = "date",
find_last_match_in_period = True,
date_format = "YYYY-MM-DD",
),
## Primary immune deficiencies
immunosupression_nhsd = patients.with_these_clinical_events(
immunosupression_nhsd_codes,
returning = "date",
find_last_match_in_period = True,
date_format = "YYYY-MM-DD",
),
## HIV/AIDs
hiv_aids_nhsd_snomed = patients.with_these_clinical_events(
hiv_aids_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
hiv_aids_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = hiv_aids_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
hiv_aids_nhsd = patients.minimum_of("hiv_aids_nhsd_snomed", "hiv_aids_nhsd_icd10"),
## Solid organ transplant
solid_organ_transplant_nhsd_snomed = patients.with_these_clinical_events(
solid_organ_transplant_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
solid_organ_transplant_nhsd_opcs4 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_procedures = solid_organ_transplant_nhsd_opcs4_codes,
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
return_expectations = {
"date": {"earliest": "2020-02-01"},
"rate": "exponential_increase",
"incidence": 0.01,
},
),
solid_organ_transplant_nhsd = patients.minimum_of("solid_organ_transplant_nhsd_snomed", "solid_organ_transplant_nhsd_opcs4"),
## Rare neurological conditions
### Multiple sclerosis
multiple_sclerosis_nhsd_snomed = patients.with_these_clinical_events(
multiple_sclerosis_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
multiple_sclerosis_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = multiple_sclerosis_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
multiple_sclerosis_nhsd = patients.minimum_of("multiple_sclerosis_nhsd_snomed", "multiple_sclerosis_nhsd_icd10"),
### Motor neurone disease
motor_neurone_disease_nhsd_snomed = patients.with_these_clinical_events(
motor_neurone_disease_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
motor_neurone_disease_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = motor_neurone_disease_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
motor_neurone_disease_nhsd = patients.minimum_of("motor_neurone_disease_nhsd_snomed", "motor_neurone_disease_nhsd_icd10"),
### Myasthenia gravis
myasthenia_gravis_nhsd_snomed = patients.with_these_clinical_events(
myasthenia_gravis_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
myasthenia_gravis_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = myasthenia_gravis_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
myasthenia_gravis_nhsd = patients.minimum_of("myasthenia_gravis_nhsd_snomed", "myasthenia_gravis_nhsd_icd10"),
### Huntington’s disease
huntingtons_disease_nhsd_snomed = patients.with_these_clinical_events(
huntingtons_disease_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
huntingtons_disease_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = huntingtons_disease_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
huntingtons_disease_nhsd = patients.minimum_of("huntingtons_disease_nhsd_snomed", "huntingtons_disease_nhsd_icd10"),
# CLINICAL/DEMOGRAPHIC COVARIATES ----
## Sex
sex = patients.sex(
return_expectations = {
"rate": "universal",
"category": {"ratios": {"M": 0.49, "F": 0.51}},
}
),
## Ethnicity
ethnicity_primis = patients.with_these_clinical_events(
ethnicity_primis_codes,
returning = "category",
find_last_match_in_period = True,
include_date_of_match = False,
return_expectations = {
"category": {"ratios": {"1": 0.2, "2": 0.2, "3": 0.2, "4": 0.2, "5": 0.2}},
"incidence": 0.75,
},
),
ethnicity_sus = patients.with_ethnicity_from_sus(
returning = "group_6",
use_most_frequent_code = True,
return_expectations = {
"category": {"ratios": {"1": 0.2, "2": 0.2, "3": 0.2, "4": 0.2, "5": 0.2}},
"incidence": 0.8,
},
),
## Index of multiple deprivation
imd = patients.categorised_as(
{"0": "DEFAULT",
"1": """index_of_multiple_deprivation >=1 AND index_of_multiple_deprivation < 32844*1/5""",
"2": """index_of_multiple_deprivation >= 32844*1/5 AND index_of_multiple_deprivation < 32844*2/5""",
"3": """index_of_multiple_deprivation >= 32844*2/5 AND index_of_multiple_deprivation < 32844*3/5""",
"4": """index_of_multiple_deprivation >= 32844*3/5 AND index_of_multiple_deprivation < 32844*4/5""",
"5": """index_of_multiple_deprivation >= 32844*4/5 """,
},
index_of_multiple_deprivation = patients.address_as_of(
"index_date",
returning = "index_of_multiple_deprivation",
round_to_nearest = 100,
),
return_expectations = {
"rate": "universal",
"category": {
"ratios": {
"0": 0.01,
"1": 0.20,
"2": 0.20,
"3": 0.20,
"4": 0.20,
"5": 0.19,
}},
},
),
## Region - NHS England 9 regions
region_nhs = patients.registered_practice_as_of(
"index_date",
returning = "nuts1_region_name",
return_expectations = {
"rate": "universal",
"category": {
"ratios": {
"North East": 0.1,
"North West": 0.1,
"Yorkshire and The Humber": 0.1,
"East Midlands": 0.1,
"West Midlands": 0.1,
"East": 0.1,
"London": 0.2,
"South West": 0.1,
"South East": 0.1,},},
},
),
region_covid_therapeutics = patients.with_covid_therapeutics(
#with_these_statuses = ["Approved", "Treatment Complete"],
with_these_therapeutics = ["Sotrovimab", "Molnupiravir", "Casirivimab and imdevimab"],
with_these_indications = "non_hospitalised",
on_or_after = "index_date",
find_first_match_in_period = True,
returning = "region",
return_expectations = {
"rate": "universal",
"category": {
"ratios": {
"North East": 0.1,
"North West": 0.1,
"Yorkshire and The Humber": 0.1,
"East Midlands": 0.1,
"West Midlands": 0.1,
"East": 0.1,
"London": 0.2,
"South West": 0.1,
"South East": 0.1,},},
},
),
## CMDUs/ICS
)
| 31.255411
| 128
| 0.673084
|
################################################################################
#
# Description: This script provides the formal specification of the study data
# that will be extracted from the OpenSAFELY database.
#
# Output: output/data/input_*.csv.gz
#
# Author(s): M Green (edited by H Curtis)
# Date last updated: 03/02/2022
#
################################################################################
# IMPORT STATEMENTS ----
## Import code building blocks from cohort extractor package
from cohortextractor import (
StudyDefinition,
patients,
codelist_from_csv,
codelist,
filter_codes_by_category,
combine_codelists,
Measure
)
## Import codelists from codelist.py (which pulls them from the codelist folder)
from codelists import *
# DEFINE STUDY POPULATION ----
## Define study time variables
from datetime import date
campaign_start = "2021-12-16"
end_date = date.today().isoformat()
## Define study population and variables
study = StudyDefinition(
# PRELIMINARIES ----
## Configure the expectations framework
default_expectations = {
"date": {"earliest": "2021-11-01", "latest": "today"},
"rate": "uniform",
"incidence": 0.4,
},
## Define index date
index_date = campaign_start,
# POPULATION ----
population = patients.satisfying(
"""
(registered_eligible OR registered_treated)
AND
NOT has_died
AND
(sotrovimab_covid_therapeutics
OR molnupiravir_covid_therapeutics
OR casirivimab_covid_therapeutics
OR covid_test_positive
)
""",
has_died = patients.died_from_any_cause(
on_or_before = "index_date - 1 day",
returning = "binary_flag",
),
),
# TREATMENT - NEUTRALISING MONOCLONAL ANTIBODIES OR ANTIVIRALS ----
## Sotrovimab
sotrovimab_covid_therapeutics = patients.with_covid_therapeutics(
#with_these_statuses = ["Approved", "Treatment Complete"],
with_these_therapeutics = "Sotrovimab",
with_these_indications = "non_hospitalised",
on_or_after = "index_date",
find_first_match_in_period = True,
returning = "date",
date_format = "YYYY-MM-DD",
return_expectations = {
"date": {"earliest": "2021-12-20"},
"incidence": 0.4
},
),
### Molnupiravir
molnupiravir_covid_therapeutics = patients.with_covid_therapeutics(
#with_these_statuses = ["Approved", "Treatment Complete"],
with_these_therapeutics = "Molnupiravir",
with_these_indications = "non_hospitalised",
on_or_after = "index_date",
find_first_match_in_period = True,
returning = "date",
date_format = "YYYY-MM-DD",
return_expectations = {
"date": {"earliest": "2021-12-20"},
"incidence": 0.4
},
),
### Casirivimab and imdevimab
casirivimab_covid_therapeutics = patients.with_covid_therapeutics(
#with_these_statuses = ["Approved", "Treatment Complete"],
with_these_therapeutics = "Casirivimab and imdevimab",
with_these_indications = "non_hospitalised",
on_or_after = "index_date",
find_first_match_in_period = True,
returning = "date",
date_format = "YYYY-MM-DD",
return_expectations = {
"date": {"earliest": "2021-12-20"},
"incidence": 0.4
},
),
date_treated = patients.minimum_of(
"sotrovimab_covid_therapeutics",
"molnupiravir_covid_therapeutics",
"casirivimab_covid_therapeutics",
),
# ELIGIBILITY CRITERIA VARIABLES ----
## Inclusion criteria variables
### SARS-CoV-2 test
# Note patients are eligible for treatment if diagnosed <=5d ago
# in the latest 5 days there may be patients identified as eligible who have not yet been treated
covid_test_positive = patients.with_test_result_in_sgss(
pathogen = "SARS-CoV-2",
test_result = "positive",
returning = "binary_flag",
on_or_after = "index_date - 5 days",
find_first_match_in_period = True,
restrict_to_earliest_specimen_date = False,
return_expectations = {
"incidence": 0.2
},
),
covid_test_date = patients.with_test_result_in_sgss(
pathogen = "SARS-CoV-2",
test_result = "positive",
find_first_match_in_period = True,
restrict_to_earliest_specimen_date = False,
returning = "date",
date_format = "YYYY-MM-DD",
on_or_after = "index_date - 5 days",
return_expectations = {
"date": {"earliest": "2021-12-20", "latest": "index_date"},
"incidence": 0.9
},
),
covid_positive_test_type = patients.with_test_result_in_sgss(
pathogen = "SARS-CoV-2",
test_result = "positive",
returning = "case_category",
on_or_after = "index_date - 5 days",
restrict_to_earliest_specimen_date = True,
return_expectations = {
"category": {"ratios": {"LFT_Only": 0.4, "PCR_Only": 0.4, "LFT_WithPCR": 0.2}},
"incidence": 0.2,
},
),
covid_positive_previous_30_days = patients.with_test_result_in_sgss(
pathogen = "SARS-CoV-2",
test_result = "positive",
returning = "binary_flag",
between = ["covid_test_date - 31 days", "covid_test_date - 1 day"],
find_last_match_in_period = True,
restrict_to_earliest_specimen_date = False,
return_expectations = {
"incidence": 0.05
},
),
### Onset of symptoms of COVID-19
symptomatic_covid_test = patients.with_test_result_in_sgss(
pathogen = "SARS-CoV-2",
test_result = "any",
returning = "symptomatic",
on_or_after = "index_date - 5 days",
find_first_match_in_period = True,
restrict_to_earliest_specimen_date = False,
return_expectations={
"incidence": 0.1,
"category": {
"ratios": {
"": 0.2,
"N": 0.2,
"Y": 0.6,
}
},
},
),
covid_symptoms_snomed = patients.with_these_clinical_events(
covid_symptoms_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
on_or_after = "index_date - 5 days",
),
# CENSORING ----
registered_eligible = patients.registered_as_of("covid_test_date"),
registered_treated = patients.registered_as_of("date_treated"),
## Death of any cause
death_date = patients.died_from_any_cause(
returning = "date_of_death",
date_format = "YYYY-MM-DD",
on_or_after = "covid_test_date",
return_expectations = {
"date": {"earliest": "2021-12-20", "latest": "index_date"},
"incidence": 0.1
},
),
## De-registration
dereg_date = patients.date_deregistered_from_all_supported_practices(
on_or_after = "covid_test_date",
date_format = "YYYY-MM-DD",
return_expectations = {
"date": {"earliest": "2021-12-20", "latest": "index_date"},
"incidence": 0.1
},
),
### Blueteq ‘high risk’ cohort
high_risk_cohort_covid_therapeutics = patients.with_covid_therapeutics(
with_these_statuses = ["Approved", "Treatment Complete"],
with_these_therapeutics = ["Sotrovimab", "Molnupiravir","Casirivimab and imdevimab"],
with_these_indications = "non_hospitalised",
on_or_after = "index_date",
find_first_match_in_period = True,
returning = "risk_group",
date_format = "YYYY-MM-DD",
return_expectations = {
"rate": "universal",
"category": {
"ratios": {
"Down's syndrome": 0.1,
"Sickle cell disease": 0.1,
"solid cancer": 0.1,
"haematological diseases, stem cell transplant recipients": 0.1,
"renal disease": 0.1,
"liver disease": 0.1,
"immune-mediated inflammatory disorders (IMID)": 0.2,
"Primary immune deficiencies": 0.1,
"HIV/AIDS": 0.1,},},
},
),
### NHSD ‘high risk’ cohort (codelist to be defined if/when data avaliable)
# high_risk_cohort_nhsd = patients.with_these_clinical_events(
# high_risk_cohort_nhsd_codes,
# between = [campaign_start, index_date],
# returning = "date",
# date_format = "YYYY-MM-DD",
# find_first_match_in_period = True,
# ),
## Exclusion criteria
### Pattern of clinical presentation indicates that there is recovery rather than risk of deterioration from infection
# (not currently possible to define/code)
### Require hospitalisation for COVID-19
## NB this data lags behind the therapeutics/testing data so may be missing
covid_hospital_admission_date = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = covid_icd10_codes,
on_or_after = "index_date - 5 days",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
return_expectations = {
"date": {"earliest": "index_date - 5 days", "latest": "index_date"},
"rate": "uniform",
"incidence": 0.05
},
),
### New supplemental oxygen requirement specifically for the management of COVID-19 symptoms
# (not currently possible to define/code)
### Children weighing less than 40kg
# (not currently possible to define/code)
### Children aged under 12 years
age = patients.age_as_of(
"index_date",
return_expectations = {
"rate": "universal",
"int": {"distribution": "population_ages"},
"incidence" : 0.9
},
),
### Known hypersensitivity reaction to the active substances or to any of the excipients of sotrovimab
# (not currently possible to define/code)
# HIGH RISK GROUPS ----
## Down's syndrome
downs_syndrome_nhsd_snomed = patients.with_these_clinical_events(
downs_syndrome_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
downs_syndrome_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = downs_syndrome_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
downs_syndrome_nhsd = patients.minimum_of("downs_syndrome_nhsd_snomed", "downs_syndrome_nhsd_icd10"),
## Sickle cell disease
sickle_cell_disease_nhsd_snomed = patients.with_these_clinical_events(
sickle_cell_disease_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
sickle_cell_disease_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = sickle_cell_disease_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
sickle_cell_disease_nhsd = patients.minimum_of("sickle_cell_disease_nhsd_snomed", "sickle_cell_disease_nhsd_icd10"),
## Solid cancer
cancer_opensafely_snomed = patients.with_these_clinical_events(
combine_codelists(
non_haematological_cancer_opensafely_snomed_codes,
lung_cancer_opensafely_snomed_codes,
chemotherapy_radiotherapy_opensafely_snomed_codes
),
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
## Haematological diseases
haematopoietic_stem_cell_transplant_nhsd_snomed = patients.with_these_clinical_events(
haematopoietic_stem_cell_transplant_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
haematopoietic_stem_cell_transplant_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = haematopoietic_stem_cell_transplant_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
haematopoietic_stem_cell_transplant_nhsd_opcs4 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_procedures = haematopoietic_stem_cell_transplant_nhsd_opcs4_codes,
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
return_expectations = {
"date": {"earliest": "2020-02-01"},
"rate": "exponential_increase",
"incidence": 0.01,
},
),
# haematological_malignancies_nhsd_snomed = patients.with_these_clinical_events(
# haematological_malignancies_nhsd_snomed_codes,
# returning = "date",
# date_format = "YYYY-MM-DD",
# find_first_match_in_period = True,
# #on_or_before = "end_date",
# ),
haematological_malignancies_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = haematological_malignancies_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
haematological_disease_nhsd = patients.minimum_of("haematopoietic_stem_cell_transplant_nhsd_snomed",
"haematopoietic_stem_cell_transplant_nhsd_icd10",
"haematopoietic_stem_cell_transplant_nhsd_opcs4",
#"haematological_malignancies_nhsd_snomed",
"haematological_malignancies_nhsd_icd10"),
## Renal disease
ckd_stage_5_nhsd_snomed = patients.with_these_clinical_events(
ckd_stage_5_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
ckd_stage_5_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = ckd_stage_5_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
ckd_stage_5_nhsd = patients.minimum_of("ckd_stage_5_nhsd_snomed", "ckd_stage_5_nhsd_icd10"),
## Liver disease
liver_disease_nhsd_snomed = patients.with_these_clinical_events(
ckd_stage_5_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
liver_disease_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = ckd_stage_5_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
liver_disease_nhsd = patients.minimum_of("liver_disease_nhsd_snomed", "liver_disease_nhsd_icd10"),
## Immune-mediated inflammatory disorders (IMID)
imid_nhsd = patients.with_these_clinical_events(
codelist = combine_codelists(immunosuppresant_drugs_dmd_codes, immunosuppresant_drugs_snomed_codes,
oral_steroid_drugs_dmd_codes,
oral_steroid_drugs_snomed_codes),
returning = "date",
find_last_match_in_period = True,
date_format = "YYYY-MM-DD",
),
## Primary immune deficiencies
immunosupression_nhsd = patients.with_these_clinical_events(
immunosupression_nhsd_codes,
returning = "date",
find_last_match_in_period = True,
date_format = "YYYY-MM-DD",
),
## HIV/AIDs
hiv_aids_nhsd_snomed = patients.with_these_clinical_events(
hiv_aids_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
hiv_aids_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = hiv_aids_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
hiv_aids_nhsd = patients.minimum_of("hiv_aids_nhsd_snomed", "hiv_aids_nhsd_icd10"),
## Solid organ transplant
solid_organ_transplant_nhsd_snomed = patients.with_these_clinical_events(
solid_organ_transplant_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
solid_organ_transplant_nhsd_opcs4 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_procedures = solid_organ_transplant_nhsd_opcs4_codes,
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
return_expectations = {
"date": {"earliest": "2020-02-01"},
"rate": "exponential_increase",
"incidence": 0.01,
},
),
solid_organ_transplant_nhsd = patients.minimum_of("solid_organ_transplant_nhsd_snomed", "solid_organ_transplant_nhsd_opcs4"),
## Rare neurological conditions
### Multiple sclerosis
multiple_sclerosis_nhsd_snomed = patients.with_these_clinical_events(
multiple_sclerosis_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
multiple_sclerosis_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = multiple_sclerosis_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
multiple_sclerosis_nhsd = patients.minimum_of("multiple_sclerosis_nhsd_snomed", "multiple_sclerosis_nhsd_icd10"),
### Motor neurone disease
motor_neurone_disease_nhsd_snomed = patients.with_these_clinical_events(
motor_neurone_disease_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
motor_neurone_disease_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = motor_neurone_disease_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
motor_neurone_disease_nhsd = patients.minimum_of("motor_neurone_disease_nhsd_snomed", "motor_neurone_disease_nhsd_icd10"),
### Myasthenia gravis
myasthenia_gravis_nhsd_snomed = patients.with_these_clinical_events(
myasthenia_gravis_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
myasthenia_gravis_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = myasthenia_gravis_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
myasthenia_gravis_nhsd = patients.minimum_of("myasthenia_gravis_nhsd_snomed", "myasthenia_gravis_nhsd_icd10"),
### Huntington’s disease
huntingtons_disease_nhsd_snomed = patients.with_these_clinical_events(
huntingtons_disease_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
huntingtons_disease_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = huntingtons_disease_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
huntingtons_disease_nhsd = patients.minimum_of("huntingtons_disease_nhsd_snomed", "huntingtons_disease_nhsd_icd10"),
# CLINICAL/DEMOGRAPHIC COVARIATES ----
## Sex
sex = patients.sex(
return_expectations = {
"rate": "universal",
"category": {"ratios": {"M": 0.49, "F": 0.51}},
}
),
## Ethnicity
ethnicity_primis = patients.with_these_clinical_events(
ethnicity_primis_codes,
returning = "category",
find_last_match_in_period = True,
include_date_of_match = False,
return_expectations = {
"category": {"ratios": {"1": 0.2, "2": 0.2, "3": 0.2, "4": 0.2, "5": 0.2}},
"incidence": 0.75,
},
),
ethnicity_sus = patients.with_ethnicity_from_sus(
returning = "group_6",
use_most_frequent_code = True,
return_expectations = {
"category": {"ratios": {"1": 0.2, "2": 0.2, "3": 0.2, "4": 0.2, "5": 0.2}},
"incidence": 0.8,
},
),
## Index of multiple deprivation
imd = patients.categorised_as(
{"0": "DEFAULT",
"1": """index_of_multiple_deprivation >=1 AND index_of_multiple_deprivation < 32844*1/5""",
"2": """index_of_multiple_deprivation >= 32844*1/5 AND index_of_multiple_deprivation < 32844*2/5""",
"3": """index_of_multiple_deprivation >= 32844*2/5 AND index_of_multiple_deprivation < 32844*3/5""",
"4": """index_of_multiple_deprivation >= 32844*3/5 AND index_of_multiple_deprivation < 32844*4/5""",
"5": """index_of_multiple_deprivation >= 32844*4/5 """,
},
index_of_multiple_deprivation = patients.address_as_of(
"index_date",
returning = "index_of_multiple_deprivation",
round_to_nearest = 100,
),
return_expectations = {
"rate": "universal",
"category": {
"ratios": {
"0": 0.01,
"1": 0.20,
"2": 0.20,
"3": 0.20,
"4": 0.20,
"5": 0.19,
}},
},
),
## Region - NHS England 9 regions
region_nhs = patients.registered_practice_as_of(
"index_date",
returning = "nuts1_region_name",
return_expectations = {
"rate": "universal",
"category": {
"ratios": {
"North East": 0.1,
"North West": 0.1,
"Yorkshire and The Humber": 0.1,
"East Midlands": 0.1,
"West Midlands": 0.1,
"East": 0.1,
"London": 0.2,
"South West": 0.1,
"South East": 0.1,},},
},
),
region_covid_therapeutics = patients.with_covid_therapeutics(
#with_these_statuses = ["Approved", "Treatment Complete"],
with_these_therapeutics = ["Sotrovimab", "Molnupiravir", "Casirivimab and imdevimab"],
with_these_indications = "non_hospitalised",
on_or_after = "index_date",
find_first_match_in_period = True,
returning = "region",
return_expectations = {
"rate": "universal",
"category": {
"ratios": {
"North East": 0.1,
"North West": 0.1,
"Yorkshire and The Humber": 0.1,
"East Midlands": 0.1,
"West Midlands": 0.1,
"East": 0.1,
"London": 0.2,
"South West": 0.1,
"South East": 0.1,},},
},
),
## CMDUs/ICS
)
| 0
| 0
| 0
|
9c633934769dee6380c21948f3259c49e26608fa
| 5,146
|
py
|
Python
|
records_mover/db/bigquery/unloader.py
|
cwegrzyn/records-mover
|
e3b71d6c09d99d0bcd6a956b9d09d20f8abe98d2
|
[
"Apache-2.0"
] | 36
|
2020-03-17T11:56:51.000Z
|
2022-01-19T16:03:32.000Z
|
records_mover/db/bigquery/unloader.py
|
cwegrzyn/records-mover
|
e3b71d6c09d99d0bcd6a956b9d09d20f8abe98d2
|
[
"Apache-2.0"
] | 60
|
2020-03-02T23:13:29.000Z
|
2021-05-19T15:05:42.000Z
|
records_mover/db/bigquery/unloader.py
|
cwegrzyn/records-mover
|
e3b71d6c09d99d0bcd6a956b9d09d20f8abe98d2
|
[
"Apache-2.0"
] | 4
|
2020-08-11T13:17:37.000Z
|
2021-11-05T21:11:52.000Z
|
import sqlalchemy
from contextlib import contextmanager
from typing import List, Iterator, Optional, Union, Tuple
import logging
from google.cloud.bigquery.dbapi.connection import Connection
from google.cloud.bigquery.client import Client
from google.cloud.bigquery.job import ExtractJobConfig
from records_mover.db.unloader import Unloader
from records_mover.records.records_format import BaseRecordsFormat, AvroRecordsFormat
from records_mover.url.base import BaseDirectoryUrl
from records_mover.url.resolver import UrlResolver
from records_mover.records.unload_plan import RecordsUnloadPlan
from records_mover.records.records_directory import RecordsDirectory
from records_mover.db.errors import NoTemporaryBucketConfiguration
logger = logging.getLogger(__name__)
| 45.539823
| 115
| 0.666148
|
import sqlalchemy
from contextlib import contextmanager
from typing import List, Iterator, Optional, Union, Tuple
import logging
from google.cloud.bigquery.dbapi.connection import Connection
from google.cloud.bigquery.client import Client
from google.cloud.bigquery.job import ExtractJobConfig
from records_mover.db.unloader import Unloader
from records_mover.records.records_format import BaseRecordsFormat, AvroRecordsFormat
from records_mover.url.base import BaseDirectoryUrl
from records_mover.url.resolver import UrlResolver
from records_mover.records.unload_plan import RecordsUnloadPlan
from records_mover.records.records_directory import RecordsDirectory
from records_mover.db.errors import NoTemporaryBucketConfiguration
logger = logging.getLogger(__name__)
class BigQueryUnloader(Unloader):
def __init__(self,
db: Union[sqlalchemy.engine.Connection, sqlalchemy.engine.Engine],
url_resolver: UrlResolver,
gcs_temp_base_loc: Optional[BaseDirectoryUrl])\
-> None:
self.db = db
self.url_resolver = url_resolver
self.gcs_temp_base_loc = gcs_temp_base_loc
super().__init__(db=db)
def can_unload_format(self, target_records_format: BaseRecordsFormat) -> bool:
if isinstance(target_records_format, AvroRecordsFormat):
return True
return False
def can_unload_to_scheme(self, scheme: str) -> bool:
if scheme == 'gs':
return True
# Otherwise we'll need a temporary bucket configured for
# BigQuery to unload into
return self.gcs_temp_base_loc is not None
def known_supported_records_formats_for_unload(self) -> List[BaseRecordsFormat]:
return [AvroRecordsFormat()]
@contextmanager
def temporary_unloadable_directory_loc(self) -> Iterator[BaseDirectoryUrl]:
if self.gcs_temp_base_loc is None:
raise NoTemporaryBucketConfiguration('Please provide a scratch GCS URL in your config '
'(e.g., set SCRATCH_GCS_URL to a gs:// URL)')
else:
with self.gcs_temp_base_loc.temporary_directory() as temp_loc:
yield temp_loc
def _parse_bigquery_schema_name(self, schema: str) -> Tuple[Optional[str], str]:
# https://github.com/mxmzdlv/pybigquery/blob/master/pybigquery/sqlalchemy_bigquery.py#L320
dataset = None
project = None
schema_split = schema.split('.')
if len(schema_split) == 1:
dataset, = schema_split
elif len(schema_split) == 2:
project, dataset = schema_split
else:
raise ValueError(f"Could not understand schema name {schema}")
return (project, dataset)
def _extract_job_config(self, unload_plan: RecordsUnloadPlan) -> ExtractJobConfig:
config = ExtractJobConfig()
if isinstance(unload_plan.records_format, AvroRecordsFormat):
config.destination_format = 'AVRO'
# https://cloud.google.com/bigquery/docs/loading-data-cloud-storage-avro#logical_types
config.use_avro_logical_types = True
else:
raise NotImplementedError(f'Please add support for {unload_plan.records_format}')
return config
def unload(self,
schema: str,
table: str,
unload_plan: RecordsUnloadPlan,
directory: RecordsDirectory) -> Optional[int]:
if directory.scheme != 'gs':
with self.temporary_unloadable_directory_loc() as temp_gcs_loc:
temp_directory = RecordsDirectory(temp_gcs_loc)
out = self.unload(schema=schema,
table=table,
unload_plan=unload_plan,
directory=temp_directory)
temp_directory.copy_to(directory.loc)
return out
logger.info("Loading from records directory into BigQuery")
# https://googleapis.github.io/google-cloud-python/latest/bigquery/usage/tables.html#creating-a-table
connection: Connection =\
self.db.engine.raw_connection().connection
# https://google-cloud.readthedocs.io/en/latest/bigquery/generated/google.cloud.bigquery.client.Client.html
client: Client = connection._client
project_id, dataset_id = self._parse_bigquery_schema_name(schema)
job_config = self._extract_job_config(unload_plan)
records_format = unload_plan.records_format
filename = records_format.generate_filename('output')
destination_uri = directory.loc.file_in_this_directory(filename)
job = client.extract_table(f"{schema}.{table}",
destination_uri.url,
# Must match the destination dataset location.
job_config=job_config)
job.result() # Waits for table load to complete.
logger.info(f"Unloaded from {dataset_id}:{table} into {filename}")
directory.save_preliminary_manifest()
return None
| 4,107
| 247
| 23
|
82eca7e21b92148d602ade08730e4aef0f573478
| 1,219
|
py
|
Python
|
depth_completion/config/resnet18_Baseline_config.py
|
tsunghan-mama/Depth-Completion
|
d73328d1d704470a6fd3859e2e1810bc311b1dc3
|
[
"MIT"
] | 67
|
2020-07-11T09:44:10.000Z
|
2022-03-30T07:38:46.000Z
|
depth_completion/config/resnet18_Baseline_config.py
|
tsunghan-mama/Depth-Completion
|
d73328d1d704470a6fd3859e2e1810bc311b1dc3
|
[
"MIT"
] | 8
|
2020-07-14T05:50:03.000Z
|
2022-01-19T09:07:46.000Z
|
depth_completion/config/resnet18_Baseline_config.py
|
patrickwu2/Depth-Completion
|
e9c52e2cb2dce558d6787e246bbc51c1670c16ca
|
[
"MIT"
] | 9
|
2019-10-12T01:09:51.000Z
|
2020-05-26T21:35:28.000Z
|
common_config = {
}
train_config = {
"dataset_name": "matterport",
"model_name": "ResNet18SkipConnection",
"in_channel": 9,
"device_ids": [0],
"seed": 7122,
"num_workers": 8,
"mode": "train",
"train_path": "/tmp2/tsunghan/new_matterport/v1",
"lr": 1e-4,
"batch_size": 8,
"loss_func": {('depth(L2)', 'depth_L2_loss', 1.)},
"load_model_path": None,
"param_only": False,
"validation": True,
"valid_path": "/tmp2/tsunghan/new_matterport/v1",
"epoches": 100,
"save_prefix": "",
}
test_config = {
"dataset_name": "matterport",
"model_name": "ResNet18SkipConnection",
"in_channel": 9,
"device_ids": [0, 1, 2, 3],
"seed": 7122,
"num_workers": 8,
"mode": "test",
"test_path": "/tmp2/tsunghan/new_matterport/v1",
"lr": 1e-4,
"batch_size": 1,
"loss_func": {('depth(L2)', 'depth_L2_loss', 1.), ('img_grad', 'img_grad_loss', 1e-3)},
"load_model_path": "/tmp2/tsunghan/twcc_data/twcc_experience_resnet/matterport_ResNet18SkipConnection_b10_lr0.0001_/epoch_13.pt",
"param_only": True,
"epoches": 100,
"save_prefix": "resnet",
"output":"/tmp2/tsunghan/experiment_result/mat_npy/r18sc_epo13",
}
| 27.088889
| 133
| 0.61854
|
common_config = {
}
train_config = {
"dataset_name": "matterport",
"model_name": "ResNet18SkipConnection",
"in_channel": 9,
"device_ids": [0],
"seed": 7122,
"num_workers": 8,
"mode": "train",
"train_path": "/tmp2/tsunghan/new_matterport/v1",
"lr": 1e-4,
"batch_size": 8,
"loss_func": {('depth(L2)', 'depth_L2_loss', 1.)},
"load_model_path": None,
"param_only": False,
"validation": True,
"valid_path": "/tmp2/tsunghan/new_matterport/v1",
"epoches": 100,
"save_prefix": "",
}
test_config = {
"dataset_name": "matterport",
"model_name": "ResNet18SkipConnection",
"in_channel": 9,
"device_ids": [0, 1, 2, 3],
"seed": 7122,
"num_workers": 8,
"mode": "test",
"test_path": "/tmp2/tsunghan/new_matterport/v1",
"lr": 1e-4,
"batch_size": 1,
"loss_func": {('depth(L2)', 'depth_L2_loss', 1.), ('img_grad', 'img_grad_loss', 1e-3)},
"load_model_path": "/tmp2/tsunghan/twcc_data/twcc_experience_resnet/matterport_ResNet18SkipConnection_b10_lr0.0001_/epoch_13.pt",
"param_only": True,
"epoches": 100,
"save_prefix": "resnet",
"output":"/tmp2/tsunghan/experiment_result/mat_npy/r18sc_epo13",
}
| 0
| 0
| 0
|
38b4f6b2219146f62a43cb5525a1f50ceb4102df
| 660
|
py
|
Python
|
scheduler_task/study_apscheduler/examples/demo.py
|
2581676612/python
|
b309564a05838b23044bb8112fd4ef71307266b6
|
[
"MIT"
] | 112
|
2017-09-19T17:38:38.000Z
|
2020-05-27T18:00:27.000Z
|
scheduler_task/study_apscheduler/examples/demo.py
|
tomoncle/Python-notes
|
ce675486290c3d1c7c2e4890b57e3d0c8a1228cc
|
[
"MIT"
] | null | null | null |
scheduler_task/study_apscheduler/examples/demo.py
|
tomoncle/Python-notes
|
ce675486290c3d1c7c2e4890b57e3d0c8a1228cc
|
[
"MIT"
] | 56
|
2017-09-20T01:24:12.000Z
|
2020-04-16T06:19:31.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 17-8-13 上午11:33
# @Author : Tom.Lee
# @CopyRight : 2016-2017 OpenBridge by yihecloud
# @File : demo.py
# @Product : PyCharm
# @Docs :
# @Source :
import os
from apscheduler.schedulers.blocking import BlockingScheduler
if __name__ == '__main__':
scheduler = BlockingScheduler()
scheduler.add_job('sys:stdout.write', 'interval', seconds=3, args=['tick ...\n'])
print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
try:
scheduler.start()
except (KeyboardInterrupt, SystemExit):
pass
| 26.4
| 85
| 0.587879
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 17-8-13 上午11:33
# @Author : Tom.Lee
# @CopyRight : 2016-2017 OpenBridge by yihecloud
# @File : demo.py
# @Product : PyCharm
# @Docs :
# @Source :
import os
from apscheduler.schedulers.blocking import BlockingScheduler
if __name__ == '__main__':
scheduler = BlockingScheduler()
scheduler.add_job('sys:stdout.write', 'interval', seconds=3, args=['tick ...\n'])
print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
try:
scheduler.start()
except (KeyboardInterrupt, SystemExit):
pass
| 0
| 0
| 0
|
0fc246feb45369af60c1a8007ad889850bd24825
| 4,829
|
py
|
Python
|
clearblade/ClearBladeCore.py
|
sraman0302/ClearBlade-Python-SDK
|
bde192ef86969c8d1c592f7697ca104bc2362408
|
[
"Apache-2.0"
] | 2
|
2018-05-10T18:38:04.000Z
|
2020-12-19T08:14:21.000Z
|
clearblade/ClearBladeCore.py
|
sraman0302/ClearBlade-Python-SDK
|
bde192ef86969c8d1c592f7697ca104bc2362408
|
[
"Apache-2.0"
] | 6
|
2018-01-13T17:05:51.000Z
|
2021-09-01T18:25:41.000Z
|
clearblade/ClearBladeCore.py
|
sraman0302/ClearBlade-Python-SDK
|
bde192ef86969c8d1c592f7697ca104bc2362408
|
[
"Apache-2.0"
] | 4
|
2018-11-08T21:18:08.000Z
|
2021-05-10T01:07:14.000Z
|
from __future__ import absolute_import
import atexit
from . import Users
from . import Devices
from . import Collections
from . import Messaging
from . import Code
from .Developers import * # allows you to import Developer from ClearBladeCore
from . import cbLogs
#############
# USERS #
#############
###############
# DEVICES #
###############
############
# DATA #
############
############
# MQTT #
############
############
# CODE #
############
| 31.154839
| 168
| 0.600745
|
from __future__ import absolute_import
import atexit
from . import Users
from . import Devices
from . import Collections
from . import Messaging
from . import Code
from .Developers import * # allows you to import Developer from ClearBladeCore
from . import cbLogs
class System:
def __exitcode(self):
# forces all users to log out on system close.
# I did this to prevent possible token reuse
# after client code exits, even if they don't
# log their users out themselves.
while self.users:
self.users.pop(0).logout()
def __init__(self, systemKey, systemSecret, url="https://platform.clearblade.com", safe=True, sslVerify=True):
self.systemKey = systemKey
self.systemSecret = systemSecret
self.url = url
self.users = []
self.collections = []
self.messagingClients = []
self.devices = []
self.sslVerify = sslVerify
if not sslVerify:
cbLogs.warn("You have disabled SSL verification, this should only be done if your ClearBlade Platform instance is leveraging self signed SSL certificates.")
if safe:
atexit.register(self.__exitcode)
#############
# USERS #
#############
def User(self, email, password="", authToken=""):
user = Users.User(self, email, password=password, authToken=authToken)
if authToken == "":
user.authenticate()
return user
elif user.checkAuth():
return user
else:
cbLogs.error("Invalid User authToken")
exit(-1)
def AnonUser(self):
anon = Users.AnonUser(self)
anon.authenticate()
return anon
def registerUser(self, authenticatedUser, email, password):
n00b = Users.registerUser(self, authenticatedUser, email, password)
self.users.append(n00b)
return n00b
def ServiceUser(self, email, token):
user = Users.ServiceUser(self, email, token)
if user.checkAuth():
return user
else:
cbLogs.error("Service User ", email, "failed to Auth")
exit(-1)
###############
# DEVICES #
###############
def getDevices(self, authenticatedUser, query=None):
self.devices = Devices.getDevices(self, authenticatedUser, query)
return self.devices
def getDevice(self, authenticatedUser, name):
dev = Devices.getDevice(self, authenticatedUser, name)
return dev
def Device(self, name, key="", authToken=""):
dev = Devices.Device(system=self, name=name, key=key, authToken=authToken)
# check if dev in self.devices?
return dev
############
# DATA #
############
def Collection(self, authenticatedUser, collectionID="", collectionName=""):
if not collectionID and not collectionName:
cbLogs.error("beep")
exit(-1)
col = Collections.Collection(self, authenticatedUser, collectionID, collectionName)
self.collections.append(col)
return col
############
# MQTT #
############
def Messaging(self, user, port=1883, keepalive=30, url="", client_id="", use_tls=False):
msg = Messaging.Messaging(user, port, keepalive, url, client_id=client_id, use_tls=use_tls)
self.messagingClients.append(msg)
return msg
############
# CODE #
############
def Service(self, name):
return Code.Service(self, name)
class Query:
def __init__(self):
self.sorting = [] # only used in fetches. also, not implemented yet. TODO
self.filters = []
def Or(self, query):
# NOTE: you can't add filters after
# you Or two queries together.
# This function has to be the last step.
q = Query()
for filter in self.filters:
q.filters.append(filter)
for filter in query.filters:
q.filters.append(filter)
return q
def __addFilter(self, column, value, operator):
if len(self.filters) == 0:
self.filters.append([])
self.filters[0].append({operator: [{column: value}]})
def equalTo(self, column, value):
self.__addFilter(column, value, "EQ")
def greaterThan(self, column, value):
self.__addFilter(column, value, "GT")
def lessThan(self, column, value):
self.__addFilter(column, value, "LT")
def greaterThanEqualTo(self, column, value):
self.__addFilter(column, value, "GTE")
def lessThanEqualTo(self, column, value):
self.__addFilter(column, value, "LTE")
def notEqualTo(self, column, value):
self.__addFilter(column, value, "NEQ")
def matches(self, column, value):
self.__addFilter(column, value, "RE")
| 3,669
| -17
| 638
|
78df92a0ac52515a71841949cff2f4cccb3a01f0
| 698
|
py
|
Python
|
GoogleCodeJam2017/Round0/TidyNumbers/TidyNumbers.py
|
Jspsun/CompetitiveCoding
|
a815bbcdab1fb30bd83730a7abd3505bff8bfb78
|
[
"MIT"
] | null | null | null |
GoogleCodeJam2017/Round0/TidyNumbers/TidyNumbers.py
|
Jspsun/CompetitiveCoding
|
a815bbcdab1fb30bd83730a7abd3505bff8bfb78
|
[
"MIT"
] | null | null | null |
GoogleCodeJam2017/Round0/TidyNumbers/TidyNumbers.py
|
Jspsun/CompetitiveCoding
|
a815bbcdab1fb30bd83730a7abd3505bff8bfb78
|
[
"MIT"
] | null | null | null |
if __name__ == '__main__':
__main__()
| 21.151515
| 71
| 0.465616
|
def __main__():
f = open("in.txt", 'r')
o = open("out.txt", 'w')
noOfCases = int(f.readline())
for testNo in range(noOfCases):
counter = 0
data = f.readline()
output = solver(data[:-1])
output = int(output)
o.write("Case #" + str(testNo + 1) + ": " + str(output) + "\n")
def solver(n):
n = list(n)
dex = inOrder(n)
while dex != -1:
n[dex] = str(int(n[dex]) - 1)
n = n[:dex + 1] + ['9'] * (len(n) - dex - 1)
dex = inOrder(n)
return ''.join(n)
def inOrder(n):
for i in range(len(n) - 1):
if n[i] > n[i + 1]:
return i
return -1
if __name__ == '__main__':
__main__()
| 585
| 0
| 68
|
9d9072a0352d441e7a4e2e3e0c976746c5e8f9af
| 986
|
py
|
Python
|
project_dashboard/projects/crud.py
|
KruizerChick/project-dashboard
|
aa1d3fa713e49049ac7184dbe44a1f915ff56906
|
[
"MIT"
] | null | null | null |
project_dashboard/projects/crud.py
|
KruizerChick/project-dashboard
|
aa1d3fa713e49049ac7184dbe44a1f915ff56906
|
[
"MIT"
] | null | null | null |
project_dashboard/projects/crud.py
|
KruizerChick/project-dashboard
|
aa1d3fa713e49049ac7184dbe44a1f915ff56906
|
[
"MIT"
] | null | null | null |
""" CRUD class for Projects app """
from crudbuilder.abstract import BaseCrudBuilder
from .models.project import Project
from .models.stakeholder import Stakeholder
class ProjectCrud(BaseCrudBuilder):
""" CRUD class for Project model """
model = Project
search_fields = ["id", "name", "description"]
tables2_fields = ("name", "description", 'is_closed')
tables2_css_class = "table table-bordered table-condensed"
login_required = True
permission_required = True
# tables2_pagination = 20 # default is 10
modelform_excludes = ['created']
# permissions = {}
# custom_templates = {}
class StakeholderCrud(BaseCrudBuilder):
""" CRUD class for Stakeholder model """
model = Stakeholder
search_fields = ["full_name", ]
tables2_fields = ("full_name", "organization")
tables2_css_class = "table table-bordered table-condensed"
login_required = True
permission_required = True
modelform_excludes = ['created']
| 29
| 62
| 0.703854
|
""" CRUD class for Projects app """
from crudbuilder.abstract import BaseCrudBuilder
from .models.project import Project
from .models.stakeholder import Stakeholder
class ProjectCrud(BaseCrudBuilder):
""" CRUD class for Project model """
model = Project
search_fields = ["id", "name", "description"]
tables2_fields = ("name", "description", 'is_closed')
tables2_css_class = "table table-bordered table-condensed"
login_required = True
permission_required = True
# tables2_pagination = 20 # default is 10
modelform_excludes = ['created']
# permissions = {}
# custom_templates = {}
class StakeholderCrud(BaseCrudBuilder):
""" CRUD class for Stakeholder model """
model = Stakeholder
search_fields = ["full_name", ]
tables2_fields = ("full_name", "organization")
tables2_css_class = "table table-bordered table-condensed"
login_required = True
permission_required = True
modelform_excludes = ['created']
| 0
| 0
| 0
|
db476ed9048fe8a87e8164fd5dd10cfe61c7b0bf
| 486
|
py
|
Python
|
L1Trigger/L1TMuonOverlap/python/fakeOmtfFwVersion_cff.py
|
PKUfudawei/cmssw
|
8fbb5ce74398269c8a32956d7c7943766770c093
|
[
"Apache-2.0"
] | 2
|
2020-10-26T18:40:32.000Z
|
2021-04-10T16:33:25.000Z
|
L1Trigger/L1TMuonOverlap/python/fakeOmtfFwVersion_cff.py
|
PKUfudawei/cmssw
|
8fbb5ce74398269c8a32956d7c7943766770c093
|
[
"Apache-2.0"
] | 30
|
2015-11-04T11:42:27.000Z
|
2021-12-01T07:56:34.000Z
|
L1Trigger/L1TMuonOverlap/python/fakeOmtfFwVersion_cff.py
|
PKUfudawei/cmssw
|
8fbb5ce74398269c8a32956d7c7943766770c093
|
[
"Apache-2.0"
] | 8
|
2016-03-25T07:17:43.000Z
|
2021-07-08T17:11:21.000Z
|
import FWCore.ParameterSet.Config as cms
omtfFwVersionSource = cms.ESSource(
"EmptyESSource",
recordName = cms.string('L1TMuonOverlapFwVersionRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
###OMTF FW ESProducer.
omtfFwVersion = cms.ESProducer(
"L1TMuonOverlapFwVersionESProducer",
algoVersion = cms.uint32(0x110),
layersVersion = cms.uint32(6),
patternsVersion = cms.uint32(3),
synthDate = cms.string("2001-01-01 00:00")
)
| 25.578947
| 58
| 0.716049
|
import FWCore.ParameterSet.Config as cms
omtfFwVersionSource = cms.ESSource(
"EmptyESSource",
recordName = cms.string('L1TMuonOverlapFwVersionRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
###OMTF FW ESProducer.
omtfFwVersion = cms.ESProducer(
"L1TMuonOverlapFwVersionESProducer",
algoVersion = cms.uint32(0x110),
layersVersion = cms.uint32(6),
patternsVersion = cms.uint32(3),
synthDate = cms.string("2001-01-01 00:00")
)
| 0
| 0
| 0
|
bccbd46e4500f876a02aadf6e0c1065d389cdf38
| 4,603
|
py
|
Python
|
planning/planning/page/check_in_out/check_in_out.py
|
nishta/planning
|
5be1574111b9b94ec75c74960ace4314985b0014
|
[
"MIT"
] | null | null | null |
planning/planning/page/check_in_out/check_in_out.py
|
nishta/planning
|
5be1574111b9b94ec75c74960ace4314985b0014
|
[
"MIT"
] | null | null | null |
planning/planning/page/check_in_out/check_in_out.py
|
nishta/planning
|
5be1574111b9b94ec75c74960ace4314985b0014
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
import frappe
from frappe.utils import getdate, validate_email_add, today
import datetime
from planning.planning.myfunction import mail_format_pms,actual_date_update,close_task_update
@frappe.whitelist()
@frappe.whitelist()
@frappe.whitelist()
| 39.681034
| 291
| 0.74169
|
from __future__ import unicode_literals
import frappe
from frappe.utils import getdate, validate_email_add, today
import datetime
from planning.planning.myfunction import mail_format_pms,actual_date_update,close_task_update
@frappe.whitelist()
def checking_checkout(task=None,check_status=None,name=None):
cur_date_time=frappe.utils.data.now ()
user_name=frappe.session.user
if(task):
if(check_status=="0"):
doctype="NNTask";
#select parent,members,employee_name,parenttype from `tabNNAssign` where parenttype=%s and employee_name=%s",(doctype,user_name)
count=frappe.db.sql("select task from `tabNNTask Check In Out` where status=1 and emp_name=%s",user_name);
if(count):
task=count[0][0]
frappe.msgprint("Please Checkout <b>"+ task+"</b> Task")
return "Not Valid"
else:
frappe.get_doc({
"doctype":"NNTask Check In Out",
"task":task,
"check_in":cur_date_time,
"status":1,
"emp_name":user_name
}).insert(ignore_permissions=True)
actual_date_update(task)
else:
hourly_rate=frappe.db.sql("""select hourly_rate from tabEmployee where employee_name=%s""",(user_name))
if(hourly_rate):
hourly_cost=hourly_rate[0][0]
else:
hourly_cost=0;
checkin_time=frappe.db.sql("""select check_in from `tabNNTask Check In Out` where name=%s""",name)
if(checkin_time):
checked_intime=checkin_time[0][0];
else:
checked_intime=0
time_diff_in_seconds=frappe.utils.data.time_diff_in_seconds(cur_date_time,checked_intime);
#frappe.msgprint(time_diff_in_seconds);
cost_for_seound=float(hourly_cost)/float(3600);
rate=(time_diff_in_seconds)*(cost_for_seound)
#frappe.msgprint(str(rate),raise_exception=1)
frappe.db.sql("""update `tabNNTask Check In Out` set check_out=%s,status=2,hourly_cost=%s,rate=%s where name=%s""",(cur_date_time,hourly_rate,rate,name))
else:
return "not"
@frappe.whitelist()
def getTask(doctype):
data=[]
user_name=frappe.session.user
select_task=frappe.db.sql("select name,parent,members,employee_name,parenttype from `tabNNAssign` where close_status=0 and parenttype=%s and employee_name=%s",(doctype,user_name))
if(select_task):
i=1;
values="";
for select_task_list in select_task:
sno=i;
assign_name=select_task_list[0];
task_name=select_task_list[1];
employee_id=select_task_list[2];
employee_name=select_task_list[3];
select_task_list=frappe.db.sql("""select task_list.project as project ,task_list.milestone as milestone,task_list.tasklist as task_list_name,task.duration as duration from `tabNNTasklist` task_list ,`tabNNTask` task where task.name=%s and task_list.tasklist=task.tasklist""",(task_name))
if(select_task_list):
project_name=select_task_list[0][0];
milestone=select_task_list[0][1];
task_list_name=select_task_list[0][2];
duration=select_task_list[0][3];
else:
project_name="";
milestone="";
status="Status";
close="Status";
status_che=1
checkin_status=frappe.db.sql("""select * from `tabNNTask Check In Out` where status=%s and task=%s and emp_name=%s order by creation desc""",(status_che,task_name,user_name))
if(checkin_status):
check_status=1;
check_status_name=checkin_status[0][0]
else:
check_status=0;
check_status_name="";
#worked_cocuation:
total_seconds=0;
working_hours=frappe.db.sql("""select check_in,check_out from `tabNNTask Check In Out` where status=2 and task=%s and emp_name=%s order by creation desc""",(task_name,user_name))
for working_hours_list in working_hours:
checkin_times=working_hours_list[0];
checkout_times=working_hours_list[1];
seconds=frappe.utils.data.time_diff_in_seconds(checkout_times,checkin_times);
#frappe.msgprint(seconds);
total_seconds=int(seconds)+int(total_seconds);
#frappe.msgprint(total_seconds);
worked_time=str(datetime.timedelta(seconds=total_seconds))
rows=[project_name]+[milestone]+[task_list_name]+[task_name]+[employee_name]+[check_status]+[check_status_name]+[duration]+[worked_time]+[assign_name]
data.append(rows)
i=i+1;
return data
@frappe.whitelist()
def close_task(assign_name=None,):
frappe.db.sql("""Update `tabNNAssign` set close_status=1 where name=%s""",(assign_name))
task=frappe.db.sql("""select parent from tabNNAssign where name=%s""",(assign_name))
mode=1;
task_name=task
if task:
doctype="NNTask";
count=frappe.db.sql("""select *from tabNNAssign where close_status=0 and parent=%s and parenttype=%s""",(task_name,doctype))
if not count:
close_task_update(task)
mail_format_pms(task_name,mode)
| 4,243
| 0
| 66
|
bbacbcdb8d4041cc214fabfb3adceb83044c7b88
| 1,674
|
py
|
Python
|
action.py
|
yeyeto2788/mudpi-core
|
dc477eb3ccbe3317d11a8555d245dadbdb34c257
|
[
"BSD-4-Clause"
] | null | null | null |
action.py
|
yeyeto2788/mudpi-core
|
dc477eb3ccbe3317d11a8555d245dadbdb34c257
|
[
"BSD-4-Clause"
] | 1
|
2021-03-15T14:32:34.000Z
|
2021-03-15T14:32:34.000Z
|
action.py
|
yeyeto2788/mudpi-core
|
dc477eb3ccbe3317d11a8555d245dadbdb34c257
|
[
"BSD-4-Clause"
] | null | null | null |
import json
import subprocess
import sys
import redis
| 30.436364
| 82
| 0.537037
|
import json
import subprocess
import sys
import redis
class Action():
def __init__(self, config):
self.config = config
self.name = config.get("name", "Action")
self.type = config.get("type", "event")
self.key = config.get("key", None).replace(" ",
"_").lower() if config.get(
"key") is not None else self.name.replace(" ", "_").lower()
# Actions will be either objects to publish for events
# or a command string to execute
self.action = config.get("action")
try:
self.r = config["redis"] if config[
"redis"] is not None else redis.Redis(
host='127.0.0.1', port=6379)
except KeyError:
self.r = redis.Redis(host='127.0.0.1', port=6379)
return
def init_action(self):
if self.type == 'event':
self.topic = self.config.get("topic", "mudpi")
elif self.type == 'command':
self.shell = self.config.get("shell", False)
def trigger(self, value=None):
if self.type == 'event':
self.emit_event()
elif self.type == 'command':
self.run_command(value)
return
def emit_event(self):
self.r.publish(self.topic, json.dumps(self.action))
return
def run_command(self, value=None):
if value is None:
completed_process = subprocess.run([self.action], shell=self.shell)
else:
completed_process = subprocess.run(
[self.action, json.dumps(value)], shell=self.shell)
return
| 1,464
| -6
| 158
|
b88cc6b6407fec4332c3df0cdd6f4c0dc8c904b3
| 4,290
|
py
|
Python
|
packages/girder/plugins/oauth/girder_oauth/providers/google.py
|
ShenQianwithC/HistomicsTK
|
4ad7e72a7ebdabbdfc879254fad04ce7ca47e320
|
[
"Apache-2.0"
] | 1
|
2019-11-14T18:13:26.000Z
|
2019-11-14T18:13:26.000Z
|
packages/girder/plugins/oauth/girder_oauth/providers/google.py
|
ShenQianwithC/HistomicsTK
|
4ad7e72a7ebdabbdfc879254fad04ce7ca47e320
|
[
"Apache-2.0"
] | 3
|
2018-11-15T19:52:40.000Z
|
2022-02-14T21:56:22.000Z
|
packages/girder/plugins/oauth/girder_oauth/providers/google.py
|
ShenQianwithC/HistomicsTK
|
4ad7e72a7ebdabbdfc879254fad04ce7ca47e320
|
[
"Apache-2.0"
] | 3
|
2018-05-21T19:45:19.000Z
|
2019-04-08T19:53:07.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from six.moves import urllib
from girder.api.rest import getApiUrl
from girder.exceptions import RestException
from girder.models.setting import Setting
from .base import ProviderBase
from .. import constants
| 35.75
| 79
| 0.571329
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from six.moves import urllib
from girder.api.rest import getApiUrl
from girder.exceptions import RestException
from girder.models.setting import Setting
from .base import ProviderBase
from .. import constants
class Google(ProviderBase):
_AUTH_URL = 'https://accounts.google.com/o/oauth2/auth'
_AUTH_SCOPES = ['profile', 'email']
_TOKEN_URL = 'https://accounts.google.com/o/oauth2/token'
_API_USER_URL = 'https://www.googleapis.com/plus/v1/people/me'
_API_USER_FIELDS = ('id', 'emails', 'name')
def getClientIdSetting(self):
return Setting().get(constants.PluginSettings.GOOGLE_CLIENT_ID)
def getClientSecretSetting(self):
return Setting().get(constants.PluginSettings.GOOGLE_CLIENT_SECRET)
@classmethod
def getUrl(cls, state):
clientId = Setting().get(constants.PluginSettings.GOOGLE_CLIENT_ID)
if clientId is None:
raise Exception('No Google client ID setting is present.')
callbackUrl = '/'.join((getApiUrl(), 'oauth', 'google', 'callback'))
query = urllib.parse.urlencode({
'response_type': 'code',
'access_type': 'online',
'client_id': clientId,
'redirect_uri': callbackUrl,
'state': state,
'scope': ' '.join(cls._AUTH_SCOPES)
})
return '%s?%s' % (cls._AUTH_URL, query)
def getToken(self, code):
params = {
'grant_type': 'authorization_code',
'code': code,
'client_id': self.clientId,
'client_secret': self.clientSecret,
'redirect_uri': self.redirectUri
}
resp = self._getJson(method='POST', url=self._TOKEN_URL,
data=params)
return resp
def getUser(self, token):
headers = {
'Authorization': ' '.join((
token['token_type'], token['access_token']))
}
# For privacy and efficiency, fetch only the specific needed fields
# https://developers.google.com/+/web/api/rest/#partial-response
query = urllib.parse.urlencode({
'fields': ','.join(self._API_USER_FIELDS)
})
resp = self._getJson(method='GET',
url='%s?%s' % (self._API_USER_URL, query),
headers=headers)
# Get user's OAuth2 ID
oauthId = resp.get('id')
if not oauthId:
raise RestException(
'Google Plus did not return a user ID.', code=502)
# Get user's email address
# Prefer email address with 'account' type
emails = [
email.get('value')
for email in resp.get('emails', [])
if email.get('type') == 'account'
]
if not emails:
# If an 'account' email can't be found, consider them all
emails = [
email.get('value')
for email in resp.get('emails', [])
]
if emails:
# Even if there are multiple emails, just use the first one
email = emails[0]
else:
raise RestException(
'This Google Plus user has no available email address.',
code=502)
# Get user's name
firstName = resp.get('name', {}).get('givenName', '')
lastName = resp.get('name', {}).get('familyName', '')
user = self._createOrReuseUser(oauthId, email, firstName, lastName)
return user
| 2,832
| 435
| 23
|
f00f0283a00861b00d8ace96a341aa1af6392dc8
| 177
|
py
|
Python
|
todoapp/todos/urls.py
|
dhavall13/REST-API-TodoCRUD
|
5d7179d12c4436e38658d9a7483497c8db99f4be
|
[
"MIT"
] | null | null | null |
todoapp/todos/urls.py
|
dhavall13/REST-API-TodoCRUD
|
5d7179d12c4436e38658d9a7483497c8db99f4be
|
[
"MIT"
] | null | null | null |
todoapp/todos/urls.py
|
dhavall13/REST-API-TodoCRUD
|
5d7179d12c4436e38658d9a7483497c8db99f4be
|
[
"MIT"
] | null | null | null |
from rest_framework import routers
from .api import TodoViewSet
router = routers.DefaultRouter()
router.register('api/todos', TodoViewSet, 'todos')
urlpatterns = router.urls
| 19.666667
| 50
| 0.79096
|
from rest_framework import routers
from .api import TodoViewSet
router = routers.DefaultRouter()
router.register('api/todos', TodoViewSet, 'todos')
urlpatterns = router.urls
| 0
| 0
| 0
|
91373743141e577dcbdc22838e0c93cfc222a5cc
| 241
|
py
|
Python
|
Application/get_whitelist.py
|
soheyldaliraan/instagram_sub_bot_remover
|
8ccf7134c79b8a9c9c09413321f526dd388c5609
|
[
"MIT"
] | 27
|
2019-02-10T09:04:36.000Z
|
2022-03-07T21:44:26.000Z
|
Application/get_whitelist.py
|
soheyldaliraan/instagram_sub_bot_remover
|
8ccf7134c79b8a9c9c09413321f526dd388c5609
|
[
"MIT"
] | 1
|
2022-03-01T02:45:18.000Z
|
2022-03-01T02:45:18.000Z
|
Application/get_whitelist.py
|
soheyldaliraan/instagram_sub_bot_remover
|
8ccf7134c79b8a9c9c09413321f526dd388c5609
|
[
"MIT"
] | 5
|
2019-12-27T07:43:33.000Z
|
2022-02-15T19:51:37.000Z
|
import os
import pandas as pd
import configuration
| 20.083333
| 61
| 0.717842
|
import os
import pandas as pd
import configuration
def get_whitelist():
if os.path.exists(configuration.whitelist_path):
whitelist = pd.read_csv(configuration.whitelist_path)
return list(whitelist['pk'])
return []
| 165
| 0
| 23
|
8e57bc0091c782bab46c7958d378a4ddf117035a
| 378
|
py
|
Python
|
test.py
|
xiaoweiChen/OpenVINO_Model_Convert_Website
|
ce8b0d225d1e0228aace772e3017ad3154543688
|
[
"Apache-2.0"
] | 1
|
2019-11-12T07:11:39.000Z
|
2019-11-12T07:11:39.000Z
|
test.py
|
xiaoweiChen/OpenVINO_Model_Convert_Website
|
ce8b0d225d1e0228aace772e3017ad3154543688
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
xiaoweiChen/OpenVINO_Model_Convert_Website
|
ce8b0d225d1e0228aace772e3017ad3154543688
|
[
"Apache-2.0"
] | null | null | null |
import sys
from converter import processPreTrainModels
if __name__ == '__main__':
if len(sys.argv) < 4:
print("usage: {} proto caffemodel output_dir".format(sys.argv[0]))
exit(0)
proto = sys.argv[1]
model = sys.argv[2]
output = sys.argv[3]
file_path = processPreTrainModels(
proto,
model,
output)
print("file_path is", file_path)
| 19.894737
| 70
| 0.648148
|
import sys
from converter import processPreTrainModels
if __name__ == '__main__':
if len(sys.argv) < 4:
print("usage: {} proto caffemodel output_dir".format(sys.argv[0]))
exit(0)
proto = sys.argv[1]
model = sys.argv[2]
output = sys.argv[3]
file_path = processPreTrainModels(
proto,
model,
output)
print("file_path is", file_path)
| 0
| 0
| 0
|
25a83d4dda33b6f0fdf3262666cb597207aa5a6e
| 4,990
|
py
|
Python
|
package/tests/test_common/test_vm_details_provider.py
|
DYeag/AWS-Shell
|
b5318e72373b1a948ac6aced1c0bb4566d5ae46f
|
[
"0BSD"
] | 3
|
2016-08-22T07:14:56.000Z
|
2018-03-16T07:31:44.000Z
|
package/tests/test_common/test_vm_details_provider.py
|
DYeag/AWS-Shell
|
b5318e72373b1a948ac6aced1c0bb4566d5ae46f
|
[
"0BSD"
] | 470
|
2016-03-24T13:38:08.000Z
|
2022-02-05T01:14:05.000Z
|
package/tests/test_common/test_vm_details_provider.py
|
DYeag/AWS-Shell
|
b5318e72373b1a948ac6aced1c0bb4566d5ae46f
|
[
"0BSD"
] | 9
|
2016-06-20T11:41:54.000Z
|
2020-11-21T00:42:45.000Z
|
from unittest import TestCase
from mock import Mock
from cloudshell.cp.aws.domain.common.vm_details_provider import VmDetailsProvider
| 40.901639
| 110
| 0.681964
|
from unittest import TestCase
from mock import Mock
from cloudshell.cp.aws.domain.common.vm_details_provider import VmDetailsProvider
class TestVmDetailsProvider(TestCase):
def setUp(self):
self.vm_details_provider = VmDetailsProvider()
def test_prepare_vm_details(self):
instance = Mock()
instance.image_id = 'image_id'
instance.instance_type = 'instance_type'
instance.platform = 'instance_platform'
instance.network_interfaces = []
instance.volumes.all = lambda: []
instance.iam_instance_profile = {"Arn": "arn:aws:iam::admin_role"}
vm_instance_data = self.vm_details_provider.create(instance).vmInstanceData
self.assertTrue(self._get_value(vm_instance_data, 'AMI ID') == instance.image_id)
self.assertTrue(self._get_value(vm_instance_data, 'instance type') == instance.instance_type)
self.assertTrue(self._get_value(vm_instance_data, 'platform') == instance.platform)
self.assertTrue(self._get_value(vm_instance_data, 'IAM Role') == instance.iam_instance_profile['Arn'])
def test_prepare_network_interface_objects_with_elastic_ip(self):
# elastic_ip
network_interface = Mock()
network_interface.association_attribute = {'IpOwnerId': '9929230',
'PublicIp': 'public_ip'}
network_interface.network_interface_id = 'interface_id'
network_interface.mac_address = 'mac_address'
network_interface.subnet_id = 'subnet_id'
network_interface.attachment = {'DeviceIndex': 0}
network_interface.private_ip_address = 'private_ip'
instance = Mock()
instance.network_interfaces = [
network_interface
]
network_interface_objects = self.vm_details_provider._get_vm_network_data(instance)
nio = network_interface_objects[0]
self.assertTrue(nio.interfaceId == 'interface_id')
self.assertTrue(nio.networkId == 'subnet_id')
self.assertTrue(nio.isPrimary == True)
nio_data = nio.networkData
self.assertTrue(self._get_value(nio_data, 'MAC Address') == 'mac_address')
self.assertTrue(self._get_value(nio_data, 'Elastic IP') == True)
self.assertTrue(self._get_value(nio_data, 'IP') == 'private_ip')
self.assertTrue(self._get_value(nio_data, 'Public IP') == 'public_ip')
def test_prepare_network_interface_objects_with_public_ip(self):
network_interface = Mock()
network_interface.association_attribute = dict()
network_interface.network_interface_id = 'interface_id'
network_interface.mac_address = 'mac_address'
network_interface.subnet_id = 'subnet_id'
network_interface.attachment = {'DeviceIndex': 0}
network_interface.private_ip_address = 'private_ip'
instance = Mock()
instance.public_ip_address = 'public_ip'
instance.network_interfaces = [
network_interface
]
network_interface_objects = self.vm_details_provider._get_vm_network_data(instance)
nio = network_interface_objects[0]
self.assertTrue(nio.interfaceId == 'interface_id')
self.assertTrue(nio.networkId == 'subnet_id')
self.assertTrue(nio.isPrimary == True)
nio_data = nio.networkData
self.assertTrue(self._get_value(nio_data, 'MAC Address') == 'mac_address')
self.assertTrue(self._get_value(nio_data, 'Elastic IP') == False)
self.assertTrue(self._get_value(nio_data, 'IP') == 'private_ip')
self.assertTrue(self._get_value(nio_data, 'Public IP') == '')
def test_prepare_network_interface_objects_without_public_ip(self):
network_interface = Mock()
network_interface.association_attribute = dict()
network_interface.network_interface_id = 'interface_id'
network_interface.mac_address = 'mac_address'
network_interface.subnet_id = 'subnet_id'
network_interface.attachment = {'DeviceIndex': 1}
network_interface.private_ip_address = 'private_ip'
instance = Mock()
instance.network_interfaces = [
network_interface
]
network_interface_objects = self.vm_details_provider._get_vm_network_data(instance)
nio = network_interface_objects[0]
self.assertTrue(nio.interfaceId == 'interface_id')
self.assertTrue(nio.networkId == 'subnet_id')
self.assertTrue(nio.isPrimary == False)
nio_data = nio.networkData
self.assertTrue(self._get_value(nio_data, 'MAC Address') == 'mac_address')
self.assertTrue(self._get_value(nio_data, 'Elastic IP') == False)
self.assertTrue(self._get_value(nio_data, 'IP') == 'private_ip')
self.assertTrue(self._get_value(nio_data, 'Public IP') == "")
def _get_value(self, data, key):
for item in data:
if item.key == key:
return item.value
return None
| 4,652
| 17
| 185
|
e837781e421b78fc059079fdefb0bdc32efc4414
| 3,229
|
py
|
Python
|
scripts/eval.py
|
zsinsense/demosaicnet
|
bbe8151cab86dbe46b76806cf9ec353994b389ff
|
[
"MIT"
] | null | null | null |
scripts/eval.py
|
zsinsense/demosaicnet
|
bbe8151cab86dbe46b76806cf9ec353994b389ff
|
[
"MIT"
] | null | null | null |
scripts/eval.py
|
zsinsense/demosaicnet
|
bbe8151cab86dbe46b76806cf9ec353994b389ff
|
[
"MIT"
] | null | null | null |
#!/bin/env python
"""Evaluate a demosaicking model."""
import argparse
import os
import time
import torch as th
from torch.utils.data import DataLoader
import numpy as np
import ttools
from ttools.modules.image_operators import crop_like
import demosaicnet
LOG = ttools.get_logger(__name__)
def main(args):
"""Entrypoint to the training."""
# Load model parameters from checkpoint, if any
# meta = ttools.Checkpointer.load_meta(args.checkpoint_dir)
# if meta is None:
# LOG.warning("No checkpoint found at %s, aborting.", args.checkpoint_dir)
# return
meta = {
'mode': 'bayer',
'depth': 15,
'width': 64
}
data = demosaicnet.Dataset(args.data, download=False,
mode=meta["mode"],
subset=demosaicnet.TEST_SUBSET)
dataloader = DataLoader(
data, batch_size=1, num_workers=4, pin_memory=True, shuffle=False)
if meta["mode"] == demosaicnet.BAYER_MODE:
model = demosaicnet.BayerDemosaick(depth=meta["depth"],
width=meta["width"],
pretrained=True,
pad=False)
elif meta["mode"] == demosaicnet.XTRANS_MODE:
model = demosaicnet.XTransDemosaick(depth=meta["depth"],
width=meta["width"],
pretrained=True,
pad=False)
# checkpointer = ttools.Checkpointer(args.checkpoint_dir, model, meta=meta)
# checkpointer.load_latest() # Resume from checkpoint, if any.
state_dict = th.load(args.checkpoint_dir)
model.load_state_dict(state_dict)
# No need for gradients
for p in model.parameters():
p.requires_grad = False
mse_fn = th.nn.MSELoss()
psnr_fn = PSNR()
device = "cpu"
if th.cuda.is_available():
device = "cuda"
LOG.info("Using CUDA")
count = 0
mse = 0.0
psnr = 0.0
for idx, batch in enumerate(dataloader):
mosaic = batch[0].to(device)
target = batch[1].to(device)
output = model(mosaic)
target = crop_like(target, output)
output = th.clamp(output, 0, 1)
psnr_ = psnr_fn(output, target).item()
mse_ = mse_fn(output, target).item()
psnr += psnr_
mse += mse_
count += 1
LOG.info("Image %04d, PSNR = %.1f dB, MSE = %.5f", idx, psnr_, mse_)
mse /= count
psnr /= count
LOG.info("-----------------------------------")
LOG.info("Average, PSNR = %.1f dB, MSE = %.5f", psnr, mse)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("data", help="root directory for the demosaicnet dataset.")
parser.add_argument("checkpoint_dir", help="directory with the model checkpoints.")
args = parser.parse_args()
ttools.set_logger(False)
main(args)
| 29.354545
| 87
| 0.569836
|
#!/bin/env python
"""Evaluate a demosaicking model."""
import argparse
import os
import time
import torch as th
from torch.utils.data import DataLoader
import numpy as np
import ttools
from ttools.modules.image_operators import crop_like
import demosaicnet
LOG = ttools.get_logger(__name__)
class PSNR(th.nn.Module):
def __init__(self):
super(PSNR, self).__init__()
self.mse = th.nn.MSELoss()
def forward(self, out, ref):
mse = self.mse(out, ref)
return -10*th.log10(mse+1e-12)
def main(args):
"""Entrypoint to the training."""
# Load model parameters from checkpoint, if any
# meta = ttools.Checkpointer.load_meta(args.checkpoint_dir)
# if meta is None:
# LOG.warning("No checkpoint found at %s, aborting.", args.checkpoint_dir)
# return
meta = {
'mode': 'bayer',
'depth': 15,
'width': 64
}
data = demosaicnet.Dataset(args.data, download=False,
mode=meta["mode"],
subset=demosaicnet.TEST_SUBSET)
dataloader = DataLoader(
data, batch_size=1, num_workers=4, pin_memory=True, shuffle=False)
if meta["mode"] == demosaicnet.BAYER_MODE:
model = demosaicnet.BayerDemosaick(depth=meta["depth"],
width=meta["width"],
pretrained=True,
pad=False)
elif meta["mode"] == demosaicnet.XTRANS_MODE:
model = demosaicnet.XTransDemosaick(depth=meta["depth"],
width=meta["width"],
pretrained=True,
pad=False)
# checkpointer = ttools.Checkpointer(args.checkpoint_dir, model, meta=meta)
# checkpointer.load_latest() # Resume from checkpoint, if any.
state_dict = th.load(args.checkpoint_dir)
model.load_state_dict(state_dict)
# No need for gradients
for p in model.parameters():
p.requires_grad = False
mse_fn = th.nn.MSELoss()
psnr_fn = PSNR()
device = "cpu"
if th.cuda.is_available():
device = "cuda"
LOG.info("Using CUDA")
count = 0
mse = 0.0
psnr = 0.0
for idx, batch in enumerate(dataloader):
mosaic = batch[0].to(device)
target = batch[1].to(device)
output = model(mosaic)
target = crop_like(target, output)
output = th.clamp(output, 0, 1)
psnr_ = psnr_fn(output, target).item()
mse_ = mse_fn(output, target).item()
psnr += psnr_
mse += mse_
count += 1
LOG.info("Image %04d, PSNR = %.1f dB, MSE = %.5f", idx, psnr_, mse_)
mse /= count
psnr /= count
LOG.info("-----------------------------------")
LOG.info("Average, PSNR = %.1f dB, MSE = %.5f", psnr, mse)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("data", help="root directory for the demosaicnet dataset.")
parser.add_argument("checkpoint_dir", help="directory with the model checkpoints.")
args = parser.parse_args()
ttools.set_logger(False)
main(args)
| 149
| 4
| 75
|
ace7c9af9eb249c27faf798e56fca31751c8a6ad
| 1,030
|
py
|
Python
|
lrp_toolbox/training_test.py
|
KushDen/deepimportance_code_release
|
5d16f1f95568dc402be6dfed4ad993ec0dbaa356
|
[
"MIT"
] | 18
|
2020-07-11T01:58:02.000Z
|
2021-09-17T07:08:34.000Z
|
lrp_toolbox/training_test.py
|
KushDen/deepimportance_code_release
|
5d16f1f95568dc402be6dfed4ad993ec0dbaa356
|
[
"MIT"
] | 13
|
2021-01-13T14:41:26.000Z
|
2021-12-29T02:15:10.000Z
|
lrp_toolbox/training_test.py
|
KushDen/deepimportance_code_release
|
5d16f1f95568dc402be6dfed4ad993ec0dbaa356
|
[
"MIT"
] | 8
|
2020-02-19T21:30:30.000Z
|
2022-03-11T01:34:33.000Z
|
'''
@author: Sebastian Lapuschkin
@maintainer: Sebastian Lapuschkin
@contact: sebastian.lapuschkin@hhi.fraunhofer.de, wojciech.samek@hhi.fraunhofer.de
@date: 30.09.2015
@version: 1.0
@copyright: Copyright (c) 2015-2017, Sebastian Lapuschkin, Alexander Binder, Gregoire Montavon, Klaus-Robert Mueller, Wojciech Samek
@license : BSD-2-Clause
'''
import modules
import model_io
import numpy as np ; na = np.newaxis
D,N = 2,200000
#this is the XOR problem.
X = np.random.rand(N,D) #we want [NxD] data
X = (X > 0.5)*1.0
Y = X[:,0] == X[:,1]
Y = (np.vstack((Y, np.invert(Y)))*1.0).T # and [NxC] labels
X += np.random.randn(N,D)*0.1 # add some noise to the data.
#build a network
nn = modules.Sequential([modules.Linear(2,3), modules.Tanh(),modules.Linear(3,15), modules.Tanh(), modules.Linear(15,15), modules.Tanh(), modules.Linear(15,3), modules.Tanh() ,modules.Linear(3,2), modules.SoftMax()])
#train the network.
nn.train(X,Y,Xval=X,Yval=Y, batchsize = 5)
#save the network
model_io.write(nn, '../xor_net_small_1000.txt')
| 28.611111
| 216
| 0.703883
|
'''
@author: Sebastian Lapuschkin
@maintainer: Sebastian Lapuschkin
@contact: sebastian.lapuschkin@hhi.fraunhofer.de, wojciech.samek@hhi.fraunhofer.de
@date: 30.09.2015
@version: 1.0
@copyright: Copyright (c) 2015-2017, Sebastian Lapuschkin, Alexander Binder, Gregoire Montavon, Klaus-Robert Mueller, Wojciech Samek
@license : BSD-2-Clause
'''
import modules
import model_io
import numpy as np ; na = np.newaxis
D,N = 2,200000
#this is the XOR problem.
X = np.random.rand(N,D) #we want [NxD] data
X = (X > 0.5)*1.0
Y = X[:,0] == X[:,1]
Y = (np.vstack((Y, np.invert(Y)))*1.0).T # and [NxC] labels
X += np.random.randn(N,D)*0.1 # add some noise to the data.
#build a network
nn = modules.Sequential([modules.Linear(2,3), modules.Tanh(),modules.Linear(3,15), modules.Tanh(), modules.Linear(15,15), modules.Tanh(), modules.Linear(15,3), modules.Tanh() ,modules.Linear(3,2), modules.SoftMax()])
#train the network.
nn.train(X,Y,Xval=X,Yval=Y, batchsize = 5)
#save the network
model_io.write(nn, '../xor_net_small_1000.txt')
| 0
| 0
| 0
|
c16cdfe67a57a720e41f4d1f6a82111d663200a5
| 149
|
py
|
Python
|
tests/iac_integration/cdk/testdata/cdk_v2/python/app.py
|
zhuhaow/aws-sam-cli
|
59d82ec6848b5a0cdd544d8ada838d4d34052971
|
[
"Apache-2.0"
] | 2,959
|
2018-05-08T21:48:56.000Z
|
2020-08-24T14:35:39.000Z
|
tests/iac_integration/cdk/testdata/cdk_v2/python/app.py
|
zhuhaow/aws-sam-cli
|
59d82ec6848b5a0cdd544d8ada838d4d34052971
|
[
"Apache-2.0"
] | 1,469
|
2018-05-08T22:44:28.000Z
|
2020-08-24T20:19:24.000Z
|
tests/iac_integration/cdk/testdata/cdk_v2/python/app.py
|
zhuhaow/aws-sam-cli
|
59d82ec6848b5a0cdd544d8ada838d4d34052971
|
[
"Apache-2.0"
] | 642
|
2018-05-08T22:09:19.000Z
|
2020-08-17T09:04:37.000Z
|
#!/usr/bin/env python3
from aws_cdk import App
from python.python_stack import PythonStack
app = App()
PythonStack(app, "TestStack")
app.synth()
| 13.545455
| 43
| 0.751678
|
#!/usr/bin/env python3
from aws_cdk import App
from python.python_stack import PythonStack
app = App()
PythonStack(app, "TestStack")
app.synth()
| 0
| 0
| 0
|
294a2f7086d69271812482a18de2d6157e635b9d
| 3,551
|
py
|
Python
|
parsl/executors/base.py
|
Lnaden/parsl
|
f6ad3a272fa3d62e72ac3b7c402e25f079d4ab98
|
[
"Apache-2.0"
] | null | null | null |
parsl/executors/base.py
|
Lnaden/parsl
|
f6ad3a272fa3d62e72ac3b7c402e25f079d4ab98
|
[
"Apache-2.0"
] | null | null | null |
parsl/executors/base.py
|
Lnaden/parsl
|
f6ad3a272fa3d62e72ac3b7c402e25f079d4ab98
|
[
"Apache-2.0"
] | null | null | null |
from abc import ABCMeta, abstractmethod, abstractproperty
class ParslExecutor(metaclass=ABCMeta):
"""Define the strict interface for all Executor classes.
This is a metaclass that only enforces concrete implementations of
functionality by the child classes.
In addition to the listed methods, a ParslExecutor instance must always
have a member field:
label: str - a human readable label for the executor, unique
with respect to other executors.
An executor may optionally expose:
storage_access: List[parsl.data_provider.staging.Staging] - a list of staging
providers that will be used for file staging. In the absence of this
attribute, or if this attribute is `None`, then a default value of
`parsl.data_provider.staging.default_staging` will be used by the
staging code.
Typechecker note: Ideally storage_access would be declared on executor
__init__ methods as List[Staging] - however, lists are by default
invariant, not co-variant, and it looks like @typeguard cannot be
persuaded otherwise. So if you're implementing an executor and want to
@typeguard the constructor, you'll have to use List[Any] here.
"""
@abstractmethod
def start(self, *args, **kwargs):
"""Start the executor.
Any spin-up operations (for example: starting thread pools) should be performed here.
"""
pass
@abstractmethod
def submit(self, *args, **kwargs):
"""Submit.
We haven't yet decided on what the args to this can be,
whether it should just be func, args, kwargs or be the partially evaluated
fn
"""
pass
@abstractmethod
def scale_out(self, *args, **kwargs):
"""Scale out method.
We should have the scale out method simply take resource object
which will have the scaling methods, scale_out itself should be a coroutine, since
scaling tasks can be slow.
"""
pass
@abstractmethod
def scale_in(self, blocks):
"""Scale in method.
Cause the executor to reduce the number of blocks by count.
We should have the scale in method simply take resource object
which will have the scaling methods, scale_in itself should be a coroutine, since
scaling tasks can be slow.
"""
pass
@abstractmethod
def shutdown(self, *args, **kwargs):
"""Shutdown the executor.
This includes all attached resources such as workers and controllers.
"""
pass
@abstractproperty
def scaling_enabled(self):
"""Specify if scaling is enabled.
The callers of ParslExecutors need to differentiate between Executors
and Executors wrapped in a resource provider
"""
pass
@property
def run_dir(self):
"""Path to the run directory.
"""
return self._run_dir
@run_dir.setter
@property
def hub_address(self):
"""Address to the Hub for monitoring.
"""
return self._hub_address
@hub_address.setter
@property
def hub_port(self):
"""Port to the Hub for monitoring.
"""
return self._hub_port
@hub_port.setter
| 30.350427
| 93
| 0.639538
|
from abc import ABCMeta, abstractmethod, abstractproperty
class ParslExecutor(metaclass=ABCMeta):
"""Define the strict interface for all Executor classes.
This is a metaclass that only enforces concrete implementations of
functionality by the child classes.
In addition to the listed methods, a ParslExecutor instance must always
have a member field:
label: str - a human readable label for the executor, unique
with respect to other executors.
An executor may optionally expose:
storage_access: List[parsl.data_provider.staging.Staging] - a list of staging
providers that will be used for file staging. In the absence of this
attribute, or if this attribute is `None`, then a default value of
`parsl.data_provider.staging.default_staging` will be used by the
staging code.
Typechecker note: Ideally storage_access would be declared on executor
__init__ methods as List[Staging] - however, lists are by default
invariant, not co-variant, and it looks like @typeguard cannot be
persuaded otherwise. So if you're implementing an executor and want to
@typeguard the constructor, you'll have to use List[Any] here.
"""
@abstractmethod
def start(self, *args, **kwargs):
"""Start the executor.
Any spin-up operations (for example: starting thread pools) should be performed here.
"""
pass
@abstractmethod
def submit(self, *args, **kwargs):
"""Submit.
We haven't yet decided on what the args to this can be,
whether it should just be func, args, kwargs or be the partially evaluated
fn
"""
pass
@abstractmethod
def scale_out(self, *args, **kwargs):
"""Scale out method.
We should have the scale out method simply take resource object
which will have the scaling methods, scale_out itself should be a coroutine, since
scaling tasks can be slow.
"""
pass
@abstractmethod
def scale_in(self, blocks):
"""Scale in method.
Cause the executor to reduce the number of blocks by count.
We should have the scale in method simply take resource object
which will have the scaling methods, scale_in itself should be a coroutine, since
scaling tasks can be slow.
"""
pass
@abstractmethod
def shutdown(self, *args, **kwargs):
"""Shutdown the executor.
This includes all attached resources such as workers and controllers.
"""
pass
@abstractproperty
def scaling_enabled(self):
"""Specify if scaling is enabled.
The callers of ParslExecutors need to differentiate between Executors
and Executors wrapped in a resource provider
"""
pass
@property
def run_dir(self):
"""Path to the run directory.
"""
return self._run_dir
@run_dir.setter
def run_dir(self, value):
self._run_dir = value
@property
def hub_address(self):
"""Address to the Hub for monitoring.
"""
return self._hub_address
@hub_address.setter
def hub_address(self, value):
self._hub_address = value
@property
def hub_port(self):
"""Port to the Hub for monitoring.
"""
return self._hub_port
@hub_port.setter
def hub_port(self, value):
self._hub_port = value
| 112
| 0
| 78
|
9eeb1c341a09b93233cbe624f89cddfd33fcd2f2
| 940
|
py
|
Python
|
part4c.py
|
ddlatumalea/signal_analysis
|
9e62e553f56e4c60c7e0963187e01c262d8d820e
|
[
"MIT"
] | null | null | null |
part4c.py
|
ddlatumalea/signal_analysis
|
9e62e553f56e4c60c7e0963187e01c262d8d820e
|
[
"MIT"
] | null | null | null |
part4c.py
|
ddlatumalea/signal_analysis
|
9e62e553f56e4c60c7e0963187e01c262d8d820e
|
[
"MIT"
] | 1
|
2022-03-03T13:31:23.000Z
|
2022-03-03T13:31:23.000Z
|
def fourier_transform(yi):
"""a, b = fourier_transform(yi).
Real-valued Fourier transform that determines the
coefficients of the Fourier series for a given
signal y. The coefficients of the cosine terms are
returned in the array a; those of the sine terms
in the array b. Frequencies start at zero and do
not exceed the Nyquist frequency.
yi = {y1,y2,...,xn}
"""
xi = np.arange(yi.size)
length = yi.size // 2 + 1
a, b = np.empty(length), np.empty(length)
# Compute zero and Nyquist frequency cases
a[0] = np.mean(yi)
a[-1] = yi @ np.cos(np.pi * xi) / yi.size
b[0] = 0.0
b[-1] = 0.0
# Compute ordinary cases (overwrite Nyquist if odd length)
for index in range(1, length + yi.size % 2 - 1):
arg = 2.0 * np.pi * xi * index / yi.size
a[index] = 2.0 / yi.size * yi @ np.cos(arg)
b[index] = 2.0 / yi.size * yi @ np.sin(arg)
return a, b
| 39.166667
| 62
| 0.601064
|
def fourier_transform(yi):
"""a, b = fourier_transform(yi).
Real-valued Fourier transform that determines the
coefficients of the Fourier series for a given
signal y. The coefficients of the cosine terms are
returned in the array a; those of the sine terms
in the array b. Frequencies start at zero and do
not exceed the Nyquist frequency.
yi = {y1,y2,...,xn}
"""
xi = np.arange(yi.size)
length = yi.size // 2 + 1
a, b = np.empty(length), np.empty(length)
# Compute zero and Nyquist frequency cases
a[0] = np.mean(yi)
a[-1] = yi @ np.cos(np.pi * xi) / yi.size
b[0] = 0.0
b[-1] = 0.0
# Compute ordinary cases (overwrite Nyquist if odd length)
for index in range(1, length + yi.size % 2 - 1):
arg = 2.0 * np.pi * xi * index / yi.size
a[index] = 2.0 / yi.size * yi @ np.cos(arg)
b[index] = 2.0 / yi.size * yi @ np.sin(arg)
return a, b
| 0
| 0
| 0
|
686add8ace25e333d96d69d7abbb938d46abc531
| 1,453
|
py
|
Python
|
distance-betweeen-obj/main.py
|
CrispenGari/opencv-python
|
cfa862fbf3b8b2c8899b76cee2774d6fb72ba00e
|
[
"MIT"
] | 1
|
2021-11-08T07:37:05.000Z
|
2021-11-08T07:37:05.000Z
|
distance-betweeen-obj/main.py
|
CrispenGari/opencv-python
|
cfa862fbf3b8b2c8899b76cee2774d6fb72ba00e
|
[
"MIT"
] | null | null | null |
distance-betweeen-obj/main.py
|
CrispenGari/opencv-python
|
cfa862fbf3b8b2c8899b76cee2774d6fb72ba00e
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
from math import pow, sqrt
points = []
letters = list("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
image = np.zeros((512, 512, 3), np.uint8)
while True:
cv2.putText(image, f'TO CLEAR THE POINTS PRESS (c)', (20, 20), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
cv2.imshow("DISTANCE BETWEEN TWO POINTS", image)
cv2.setMouseCallback("DISTANCE BETWEEN TWO POINTS", mouseEvent, None)
key = cv2.waitKey(1)
if key & 0xFF == 27:
cv2.destroyAllWindows()
break
elif key & 0xFF == ord('c'):
image = np.zeros((512, 512, 3), np.uint8)
points = []
# cm = pixels / 96 * 2.54
| 37.25641
| 126
| 0.604267
|
import cv2
import numpy as np
from math import pow, sqrt
points = []
letters = list("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
image = np.zeros((512, 512, 3), np.uint8)
def mouseEvent(event, x, y, params, flags):
if event == cv2.EVENT_LBUTTONDOWN:
cv2.circle(image, (x, y), 5, (0, 0, 255), -1)
cv2.putText(image, letters[len(points) if len(points) < 26 else 0], (x, y), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2)
points.append((x, y))
if len(points) > 1:
last_two_points = points[-2:]
d, midpoint = findDistance(last_two_points)
cv2.putText(image, f'{round(d)} (px)', midpoint, cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
cv2.line(image, tuple(last_two_points[0]), tuple(last_two_points[1]),(0, 255, 0), 2)
return
def findDistance(points):
x1, y1 = points[0]
x2, y2 = points[1]
d = sqrt(pow((x1 - x2), 2) + pow((y1 - y2), 2))
midpoint = tuple(([(x1 + x2)//2, (y1 + y2)//2]))
return d, midpoint
while True:
cv2.putText(image, f'TO CLEAR THE POINTS PRESS (c)', (20, 20), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
cv2.imshow("DISTANCE BETWEEN TWO POINTS", image)
cv2.setMouseCallback("DISTANCE BETWEEN TWO POINTS", mouseEvent, None)
key = cv2.waitKey(1)
if key & 0xFF == 27:
cv2.destroyAllWindows()
break
elif key & 0xFF == ord('c'):
image = np.zeros((512, 512, 3), np.uint8)
points = []
# cm = pixels / 96 * 2.54
| 772
| 0
| 45
|
8e8c991f6293082c8cec862c8abc181e7ff19a46
| 1,948
|
py
|
Python
|
Learning/python_data_analysis8.py
|
VictoriaGuXY/MCO-Menu-Checker-Online
|
706e2e1bf7395cc344f382ea2ac53d964d459f86
|
[
"MIT"
] | null | null | null |
Learning/python_data_analysis8.py
|
VictoriaGuXY/MCO-Menu-Checker-Online
|
706e2e1bf7395cc344f382ea2ac53d964d459f86
|
[
"MIT"
] | null | null | null |
Learning/python_data_analysis8.py
|
VictoriaGuXY/MCO-Menu-Checker-Online
|
706e2e1bf7395cc344f382ea2ac53d964d459f86
|
[
"MIT"
] | null | null | null |
import json
import pandas as pd
import numpy as np
from pandas import DataFrame
"""
output
"""
# Note: some output is shortened to save spaces.
# This file introduces methods to group data.
# Data from https://github.com/mwaskom/seaborn-data
df = pd.read_csv('E:\\tips.csv')
"""
total_bill tip sex smoker day time size
0 16.99 1.01 Female No Sun Dinner 2
1 10.34 1.66 Male No Sun Dinner 3
2 21.01 3.50 Male No Sun Dinner 3
3 23.68 3.31 Male No Sun Dinner 2
4 24.59 3.61 Female No Sun Dinner 4
5 25.29 4.71 Male No Sun Dinner 4
.. ... ... ... ... ... ... ...
240 27.18 2.00 Female Yes Sat Dinner 2
241 22.67 2.00 Male Yes Sat Dinner 2
242 17.82 1.75 Male No Sat Dinner 2
243 18.78 3.00 Female No Thur Dinner 2
[244 rows x 7 columns]
"""
# ------------------------------------------------------------------------------
# if we want to form group based on 'day' column
group = df.groupby('day')
# print out the first value (first line) in each group
print (group.first())
"""
total_bill tip sex smoker time size
day
Fri 28.97 3.00 Male Yes Dinner 2
Sat 20.65 3.35 Male No Dinner 3
Sun 16.99 1.01 Female No Dinner 2
Thur 27.20 4.00 Male No Lunch 4
"""
# print out the last value (last line) in each group
print (group.first())
"""
total_bill tip sex smoker time size
day
Fri 10.09 2.00 Female Yes Lunch 2
Sat 17.82 1.75 Male No Dinner 2
Sun 15.69 1.50 Male Yes Dinner 2
Thur 18.78 3.00 Female No Dinner 2
"""
| 32.466667
| 80
| 0.479466
|
import json
import pandas as pd
import numpy as np
from pandas import DataFrame
"""
output
"""
# Note: some output is shortened to save spaces.
# This file introduces methods to group data.
# Data from https://github.com/mwaskom/seaborn-data
df = pd.read_csv('E:\\tips.csv')
"""
total_bill tip sex smoker day time size
0 16.99 1.01 Female No Sun Dinner 2
1 10.34 1.66 Male No Sun Dinner 3
2 21.01 3.50 Male No Sun Dinner 3
3 23.68 3.31 Male No Sun Dinner 2
4 24.59 3.61 Female No Sun Dinner 4
5 25.29 4.71 Male No Sun Dinner 4
.. ... ... ... ... ... ... ...
240 27.18 2.00 Female Yes Sat Dinner 2
241 22.67 2.00 Male Yes Sat Dinner 2
242 17.82 1.75 Male No Sat Dinner 2
243 18.78 3.00 Female No Thur Dinner 2
[244 rows x 7 columns]
"""
# ------------------------------------------------------------------------------
# if we want to form group based on 'day' column
group = df.groupby('day')
# print out the first value (first line) in each group
print (group.first())
"""
total_bill tip sex smoker time size
day
Fri 28.97 3.00 Male Yes Dinner 2
Sat 20.65 3.35 Male No Dinner 3
Sun 16.99 1.01 Female No Dinner 2
Thur 27.20 4.00 Male No Lunch 4
"""
# print out the last value (last line) in each group
print (group.first())
"""
total_bill tip sex smoker time size
day
Fri 10.09 2.00 Female Yes Lunch 2
Sat 17.82 1.75 Male No Dinner 2
Sun 15.69 1.50 Male Yes Dinner 2
Thur 18.78 3.00 Female No Dinner 2
"""
| 0
| 0
| 0
|
948080e247360f7be9e2aa7cdc3fd4bb0c67bdac
| 438
|
py
|
Python
|
functions/reportIssue.py
|
chiluf/visvis.dev
|
373846ea25044b7ca50f44c63dab4248e14deacd
|
[
"BSD-3-Clause"
] | null | null | null |
functions/reportIssue.py
|
chiluf/visvis.dev
|
373846ea25044b7ca50f44c63dab4248e14deacd
|
[
"BSD-3-Clause"
] | null | null | null |
functions/reportIssue.py
|
chiluf/visvis.dev
|
373846ea25044b7ca50f44c63dab4248e14deacd
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (C) 2012, Almar Klein
#
# Visvis is distributed under the terms of the (new) BSD License.
# The full license can be found in 'license.txt'.
def reportIssue():
""" help()
Open a webbrowser with the visvis website at the issue list.
"""
import webbrowser
webbrowser.open("http://code.google.com/p/visvis/issues/list")
if __name__ == '__main__':
reportIssue()
| 23.052632
| 66
| 0.639269
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012, Almar Klein
#
# Visvis is distributed under the terms of the (new) BSD License.
# The full license can be found in 'license.txt'.
def reportIssue():
""" help()
Open a webbrowser with the visvis website at the issue list.
"""
import webbrowser
webbrowser.open("http://code.google.com/p/visvis/issues/list")
if __name__ == '__main__':
reportIssue()
| 0
| 0
| 0
|
405b1e05e30665caf1b56d799edb993551a9f5b1
| 217
|
py
|
Python
|
thirdfile.py
|
1frenchfrog1/testgithub
|
7191e44d75ba50438d9c2fe8f0fcf9fcf3a2a991
|
[
"MIT"
] | null | null | null |
thirdfile.py
|
1frenchfrog1/testgithub
|
7191e44d75ba50438d9c2fe8f0fcf9fcf3a2a991
|
[
"MIT"
] | null | null | null |
thirdfile.py
|
1frenchfrog1/testgithub
|
7191e44d75ba50438d9c2fe8f0fcf9fcf3a2a991
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
def printme3( str ):
"This prints a passed string into this function"
print(str)
return
def printme3too( str ):
"This prints a passed string into this function"
print(str)
return
| 18.083333
| 51
| 0.686636
|
#!/usr/bin/python
def printme3( str ):
"This prints a passed string into this function"
print(str)
return
def printme3too( str ):
"This prints a passed string into this function"
print(str)
return
| 0
| 0
| 0
|
52c36ddcbbbc1ea0125baf76215d709418864b64
| 642
|
py
|
Python
|
lec7.py
|
uni-student234/ISAT252
|
4c0942919c432456fe26900c23f076161b4cc266
|
[
"MIT"
] | null | null | null |
lec7.py
|
uni-student234/ISAT252
|
4c0942919c432456fe26900c23f076161b4cc266
|
[
"MIT"
] | null | null | null |
lec7.py
|
uni-student234/ISAT252
|
4c0942919c432456fe26900c23f076161b4cc266
|
[
"MIT"
] | null | null | null |
"""
Week 2, day 7, lec 7
"""
# i = 5
# while i >= 0:
# i = i - 1
# if i == 3:
# # break #breaks the smallest loop
# # continue #skips the current iteration and moves on
# # pass #does nothing, but is placehold if you need something for syntax
# print(i)
# for word in 'hello world'.split():
# print(word)
# for str_item in word:
# if str_item == '1':
# break
# print(str_item)
# try:
# print(1/0)
# except ZeroDivisionError:
# print('error')
i = 5
while i >= 0:
try:
print(1/(i-3))
except:
pass
i = i - 1
| 20.0625
| 90
| 0.489097
|
"""
Week 2, day 7, lec 7
"""
# i = 5
# while i >= 0:
# i = i - 1
# if i == 3:
# # break #breaks the smallest loop
# # continue #skips the current iteration and moves on
# # pass #does nothing, but is placehold if you need something for syntax
# print(i)
# for word in 'hello world'.split():
# print(word)
# for str_item in word:
# if str_item == '1':
# break
# print(str_item)
# try:
# print(1/0)
# except ZeroDivisionError:
# print('error')
i = 5
while i >= 0:
try:
print(1/(i-3))
except:
pass
i = i - 1
| 0
| 0
| 0
|
6446ebc359e3c3467ceb30fabeaa007c3100a7f7
| 11,447
|
py
|
Python
|
scripts/survivor_analysis/utils/annotate.py
|
a-paxton/oss-community-health
|
93ff4d266b5390b53d8ed59f71616de68bcfdda7
|
[
"MIT"
] | null | null | null |
scripts/survivor_analysis/utils/annotate.py
|
a-paxton/oss-community-health
|
93ff4d266b5390b53d8ed59f71616de68bcfdda7
|
[
"MIT"
] | 1
|
2022-03-22T19:32:27.000Z
|
2022-03-23T12:43:08.000Z
|
scripts/survivor_analysis/utils/annotate.py
|
a-paxton/oss-community-health
|
93ff4d266b5390b53d8ed59f71616de68bcfdda7
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
from collections import Counter
from datetime import datetime
from nltk.tokenize import RegexpTokenizer
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import re
def annotate_logs(comments, tickets):
"""
Annotates comments and tickets with additional information:
1. whether the body was updated (Boolean)
2. the number of PRs and issues opened by the comment author at the time
of the comment posting
3. comment order (comment dataframe only)
4. identify whether ticket is closed (Boolean; ticket dataframe only)
5. identify whether a comment is associated to an issue or a PR
Requires: pandas
Parameters
----------
comments : pd.DataFrame
tickets : pd.DataFrame
Returns
-------
The same dataframe, but with additional columns
Examples
--------
>> import pandas as pd
>> import utils
>> tickets = pd.read_csv("data/numpy/issues.tsv", sep="\t")
>> comments = pd.read_csv("data/numpy/comments.tsv", sep="\t")
>> comments, tickets = utils.annotate_logs(comments, tickets)
"""
# identify whether the body of comments or tickets were updated
comments["was_updated"] = comments["created_at"] != comments["updated_at"]
tickets["was_updated"] = tickets["created_at"] != tickets["updated_at"]
# comments df: add number of PRs created by author to date
num_PR_per_pers = [
sum((tickets["created_at"] < created_at) &
(tickets["type"] == "pull_request") &
(tickets["author_id"] == author_id))
for created_at, author_id
in zip(comments["created_at"], comments["author_id"])]
comments["num_PR_created"] = num_PR_per_pers
# issues df: add number of PRs created by author to date
num_PR_per_pers = [
sum((tickets["created_at"] < created_at) &
(tickets["type"] == "pull_request") &
(tickets["author_id"] == author_id))
for created_at, author_id
in zip(tickets["created_at"], tickets["author_id"])]
tickets["num_PR_created"] = num_PR_per_pers
# comments df: add number of issues created by author to date
num_issue_per_pers = [
sum((tickets["created_at"] < created_at) &
(tickets["type"] == "issue") &
(tickets["author_id"] == author_id))
for created_at, author_id
in zip(comments["created_at"], comments["author_id"])]
comments["num_issue_created"] = num_issue_per_pers
# tickets df: add number of issues created by author to date
num_issue_per_pers = [
sum((tickets["created_at"] < created_at) &
(tickets["type"] == "issue") &
(tickets["author_id"] == author_id))
for created_at, author_id
in zip(tickets["created_at"], tickets["author_id"])]
tickets["num_issue_created"] = num_issue_per_pers
# track the comment order
comments['comment_order'] = comments.sort_values(by=['created_at']) \
.groupby(by=['ticket_id']) \
.cumcount()
# identify whether the PR is closed
tickets['is_closed'] = pd.notnull(tickets['closed_at'])
mask = tickets["closed_at"].isnull()
tickets.loc[mask, "closed_at"] = pd.to_datetime(datetime.now())
open_duration = (
pd.to_datetime(tickets["closed_at"]) -
pd.to_datetime(tickets["created_at"]))
tickets["open_duration"] = open_duration.apply(
lambda x: x.total_seconds())
# Now we want to remove this estimate for anything created before 1970
m = [True if c.startswith("1970") else False
for c in tickets["created_at"]]
tickets.loc[m, "open_duration"] = np.nan
# For each comment, get the information on when the corresponding ticket
# has been opened when it is available (comments can also be added to
# commits)
tickets.set_index("ticket_id", inplace=True, drop=False)
# We're using the reindex function to tacket the case where we don't have
# the ticket associated to a particular comment.
comments["ticket_created_at"] = tickets.reindex(
comments["ticket_id"])["created_at"].values
comments["type"] = tickets.reindex(
comments["ticket_id"])["type"].values
# Reset the old index
tickets.set_index("id", inplace=True, drop=False)
# return the dataframes
return comments, tickets
def body_cleanup(comments, grateful_list, bot_list):
"""
Prepare comment or issue dataframe for text analysis:
1. Count number of times gratitude words appear in HTML comments
(i.e., auto-generated templates for PRs and issues provided
by projects)
2. Remove HTML comments
3. Remove quoted text
4. Strip newlines
5. Count and remove code blocks
6. Identify other users referenced in body
7. Flag whether the author was a bot
Requires: pandas , nltk , collections , re
Parameters
----------
comments : pd.DataFrame, ideally annotated with `annotate_logs()`;
can be run with either comments df or issues/tickets df
grateful_list : list or pd.Series of gratitude words to identify;
currently works only with grateful unigrams
bot_list : list or pd.Series of bot usernames to be ignored
Returns
-------
The same dataframe, but with cleaned body text and new columns
(code_blocks , referenced_users , bot_flag)
Examples
--------
>> import pandas as pd
>> import utils
>> comments = pd.read_csv("data/numpy/comments.tsv", sep="\t")
>> comments, tickets = utils.annotate.annotate_logs(comments, tickets)
>> comments = utils.annotate.body_cleanup(comments, bot_list_df)
"""
# replace all NaN with empty strings
comments['body'] = comments['body'].replace(np.nan, '', regex=True)
# count thanks in HTML comments
comments['html_comments'] = comments['body'].str.findall('(\<\!--.*?--\>)').apply(' '.join)
# tokenize and count words
tokenizer = RegexpTokenizer(r'\w+')
comments['html_tokenized'] = comments['html_comments'].apply(str.lower).apply(tokenizer.tokenize)
comments['html_word_count'] = comments['html_tokenized'].apply(lambda x: Counter(x))
# count words if they're in our grateful list
comments['automatic_grateful_count'] = (
comments['html_word_count'].apply(
lambda x: np.sum([v for k, v in x.items()
if k in grateful_list])))
# let us know which ones were used
comments['automatic_grateful_list'] = (
comments['html_word_count'].apply(
lambda x: [k for k in x if k in grateful_list]))
# remove the columns we don't need anymore
comments = comments.drop(columns=['html_tokenized',
'html_word_count'])
# remove the HTML comments from the body
comments['body'] = (comments['body'].str.replace(
"(<!--.*?-->)", " ",
regex=True,
flags=re.DOTALL))
# remove text quotes
comments['body'] = (comments['body'].replace(
"(^|\n|\r)+\>.*(?=\n|$)", " ",
regex=True))
# remove newlines
comments['body'] = (comments['body'].replace(
"[\n\r]+", " ", regex=True))
# count and then remove code blocks
comments['code_blocks'] = comments['body'].str.count("\`{3}")/2
comments['body'] = (comments['body'].replace(
"\`{3}.*\`{3}", " ", regex=True))
# identify other humans
comments['referenced_users'] = comments['body'].str.findall('@\w{1,}')
# identify bots
comments['bot_flag'] = comments['author_name'].isin(bot_list)
# return our dataframe
return comments
def add_sentiment(comments):
"""
Add sentiment analysis scores to comments dataframe:
* negative emotion
* positive emotion
* neutral emotion
* compound emotion
Requires: pandas , vaderSentiment
For more on vaderSentiment, see https://github.com/cjhutto/vaderSentiment
Parameters
----------
comments : pd.DataFrame
ideally after `annotate_logs()` and `body_cleanup()`;
can be run with either comments df or issues/tickets df
Returns
-------
The same dataframe but with new sentiment columns
Examples
--------
>> import pandas as pd
>> import utils
>> comments = pd.read_csv("data/numpy/comments.tsv", sep="\t")
>> comments, tickets = utils.annotate.annotate_logs(comments, tickets)
>> comments = utils.annotate.body_cleanup(comments, bot_list_df)
>> comments = utils.annotate.add_sentiment(comments)
"""
# initialize sentiment analyzer
analyser = SentimentIntensityAnalyzer()
# remove NaNs
comments['body'] = comments['body'].replace(np.nan, ' ', regex=True)
# run sentiment analyzer over each comment body
sentiment_df = (
comments['body']
.apply(analyser.polarity_scores)
.astype(str)
.str.strip('{}')
.str.split(', ', expand=True))
# split the emotion output dictionary into new columns
# (thanks to https://stackoverflow.com/a/13053267 for partial solution)
comments['negative_emotion'] = sentiment_df[0].str.split(
': ').str[-1].astype(float)
comments['neutral_emotion'] = sentiment_df[1].str.split(
': ').str[-1].astype(float)
comments['positive_emotion'] = sentiment_df[2].str.split(
': ').str[-1].astype(float)
comments['compound_emotion'] = sentiment_df[3].str.split(
': ').str[-1].astype(float)
# return our dataframe
return comments
def add_gratitude(comments, grateful_list):
"""
Track expressions of gratitude:
* overall counts
* specific words
Thanks to https://stackoverflow.com/a/47686394
Requires: pandas , nltk , collections
Parameters
----------
comments : pd.DataFrame
ideally after `annotate_logs()` and `body_cleanup()`;
can be run with either comments df or issues/tickets df
grateful_list : list or pd.Series of gratitude words to identify;
currently works only with grateful unigrams
Returns
-------
The same dataframe but with new gratitude columns
Examples
--------
>> import pandas as pd
>> import utils
>> comments = pd.read_csv("data/numpy/comments.tsv", sep="\t")
>> comments, tickets = utils.annotate.annotate_logs(comments, tickets)
>> comments = utils.annotate.body_cleanup(comments, bot_list_df)
>> comments = utils.annotate.add_gratitude(comments)
"""
# tokenize and count words
tokenizer = RegexpTokenizer(r'\w+')
comments['tokenized'] = comments['body'].apply(
str.lower).apply(tokenizer.tokenize)
comments['word_count'] = comments['tokenized'].apply(lambda x: Counter(x))
# count words if they're in our grateful list
comments['grateful_count'] = (
comments['word_count'].apply(
lambda x: np.sum([v for k, v in x.items()
if k in grateful_list])))
# let us know which ones were used
comments['grateful_list'] = (
comments['word_count'].apply(
lambda x: [k for k in x if k in grateful_list]))
# remove the columns we don't need anymore
comments = comments.drop(columns=['tokenized', 'word_count'])
# spit back our dataframe now
return comments
| 34.478916
| 101
| 0.638857
|
import pandas as pd
import numpy as np
from collections import Counter
from datetime import datetime
from nltk.tokenize import RegexpTokenizer
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import re
def annotate_logs(comments, tickets):
"""
Annotates comments and tickets with additional information:
1. whether the body was updated (Boolean)
2. the number of PRs and issues opened by the comment author at the time
of the comment posting
3. comment order (comment dataframe only)
4. identify whether ticket is closed (Boolean; ticket dataframe only)
5. identify whether a comment is associated to an issue or a PR
Requires: pandas
Parameters
----------
comments : pd.DataFrame
tickets : pd.DataFrame
Returns
-------
The same dataframe, but with additional columns
Examples
--------
>> import pandas as pd
>> import utils
>> tickets = pd.read_csv("data/numpy/issues.tsv", sep="\t")
>> comments = pd.read_csv("data/numpy/comments.tsv", sep="\t")
>> comments, tickets = utils.annotate_logs(comments, tickets)
"""
# identify whether the body of comments or tickets were updated
comments["was_updated"] = comments["created_at"] != comments["updated_at"]
tickets["was_updated"] = tickets["created_at"] != tickets["updated_at"]
# comments df: add number of PRs created by author to date
num_PR_per_pers = [
sum((tickets["created_at"] < created_at) &
(tickets["type"] == "pull_request") &
(tickets["author_id"] == author_id))
for created_at, author_id
in zip(comments["created_at"], comments["author_id"])]
comments["num_PR_created"] = num_PR_per_pers
# issues df: add number of PRs created by author to date
num_PR_per_pers = [
sum((tickets["created_at"] < created_at) &
(tickets["type"] == "pull_request") &
(tickets["author_id"] == author_id))
for created_at, author_id
in zip(tickets["created_at"], tickets["author_id"])]
tickets["num_PR_created"] = num_PR_per_pers
# comments df: add number of issues created by author to date
num_issue_per_pers = [
sum((tickets["created_at"] < created_at) &
(tickets["type"] == "issue") &
(tickets["author_id"] == author_id))
for created_at, author_id
in zip(comments["created_at"], comments["author_id"])]
comments["num_issue_created"] = num_issue_per_pers
# tickets df: add number of issues created by author to date
num_issue_per_pers = [
sum((tickets["created_at"] < created_at) &
(tickets["type"] == "issue") &
(tickets["author_id"] == author_id))
for created_at, author_id
in zip(tickets["created_at"], tickets["author_id"])]
tickets["num_issue_created"] = num_issue_per_pers
# track the comment order
comments['comment_order'] = comments.sort_values(by=['created_at']) \
.groupby(by=['ticket_id']) \
.cumcount()
# identify whether the PR is closed
tickets['is_closed'] = pd.notnull(tickets['closed_at'])
mask = tickets["closed_at"].isnull()
tickets.loc[mask, "closed_at"] = pd.to_datetime(datetime.now())
open_duration = (
pd.to_datetime(tickets["closed_at"]) -
pd.to_datetime(tickets["created_at"]))
tickets["open_duration"] = open_duration.apply(
lambda x: x.total_seconds())
# Now we want to remove this estimate for anything created before 1970
m = [True if c.startswith("1970") else False
for c in tickets["created_at"]]
tickets.loc[m, "open_duration"] = np.nan
# For each comment, get the information on when the corresponding ticket
# has been opened when it is available (comments can also be added to
# commits)
tickets.set_index("ticket_id", inplace=True, drop=False)
# We're using the reindex function to tacket the case where we don't have
# the ticket associated to a particular comment.
comments["ticket_created_at"] = tickets.reindex(
comments["ticket_id"])["created_at"].values
comments["type"] = tickets.reindex(
comments["ticket_id"])["type"].values
# Reset the old index
tickets.set_index("id", inplace=True, drop=False)
# return the dataframes
return comments, tickets
def body_cleanup(comments, grateful_list, bot_list):
"""
Prepare comment or issue dataframe for text analysis:
1. Count number of times gratitude words appear in HTML comments
(i.e., auto-generated templates for PRs and issues provided
by projects)
2. Remove HTML comments
3. Remove quoted text
4. Strip newlines
5. Count and remove code blocks
6. Identify other users referenced in body
7. Flag whether the author was a bot
Requires: pandas , nltk , collections , re
Parameters
----------
comments : pd.DataFrame, ideally annotated with `annotate_logs()`;
can be run with either comments df or issues/tickets df
grateful_list : list or pd.Series of gratitude words to identify;
currently works only with grateful unigrams
bot_list : list or pd.Series of bot usernames to be ignored
Returns
-------
The same dataframe, but with cleaned body text and new columns
(code_blocks , referenced_users , bot_flag)
Examples
--------
>> import pandas as pd
>> import utils
>> comments = pd.read_csv("data/numpy/comments.tsv", sep="\t")
>> comments, tickets = utils.annotate.annotate_logs(comments, tickets)
>> comments = utils.annotate.body_cleanup(comments, bot_list_df)
"""
# replace all NaN with empty strings
comments['body'] = comments['body'].replace(np.nan, '', regex=True)
# count thanks in HTML comments
comments['html_comments'] = comments['body'].str.findall('(\<\!--.*?--\>)').apply(' '.join)
# tokenize and count words
tokenizer = RegexpTokenizer(r'\w+')
comments['html_tokenized'] = comments['html_comments'].apply(str.lower).apply(tokenizer.tokenize)
comments['html_word_count'] = comments['html_tokenized'].apply(lambda x: Counter(x))
# count words if they're in our grateful list
comments['automatic_grateful_count'] = (
comments['html_word_count'].apply(
lambda x: np.sum([v for k, v in x.items()
if k in grateful_list])))
# let us know which ones were used
comments['automatic_grateful_list'] = (
comments['html_word_count'].apply(
lambda x: [k for k in x if k in grateful_list]))
# remove the columns we don't need anymore
comments = comments.drop(columns=['html_tokenized',
'html_word_count'])
# remove the HTML comments from the body
comments['body'] = (comments['body'].str.replace(
"(<!--.*?-->)", " ",
regex=True,
flags=re.DOTALL))
# remove text quotes
comments['body'] = (comments['body'].replace(
"(^|\n|\r)+\>.*(?=\n|$)", " ",
regex=True))
# remove newlines
comments['body'] = (comments['body'].replace(
"[\n\r]+", " ", regex=True))
# count and then remove code blocks
comments['code_blocks'] = comments['body'].str.count("\`{3}")/2
comments['body'] = (comments['body'].replace(
"\`{3}.*\`{3}", " ", regex=True))
# identify other humans
comments['referenced_users'] = comments['body'].str.findall('@\w{1,}')
# identify bots
comments['bot_flag'] = comments['author_name'].isin(bot_list)
# return our dataframe
return comments
def add_sentiment(comments):
"""
Add sentiment analysis scores to comments dataframe:
* negative emotion
* positive emotion
* neutral emotion
* compound emotion
Requires: pandas , vaderSentiment
For more on vaderSentiment, see https://github.com/cjhutto/vaderSentiment
Parameters
----------
comments : pd.DataFrame
ideally after `annotate_logs()` and `body_cleanup()`;
can be run with either comments df or issues/tickets df
Returns
-------
The same dataframe but with new sentiment columns
Examples
--------
>> import pandas as pd
>> import utils
>> comments = pd.read_csv("data/numpy/comments.tsv", sep="\t")
>> comments, tickets = utils.annotate.annotate_logs(comments, tickets)
>> comments = utils.annotate.body_cleanup(comments, bot_list_df)
>> comments = utils.annotate.add_sentiment(comments)
"""
# initialize sentiment analyzer
analyser = SentimentIntensityAnalyzer()
# remove NaNs
comments['body'] = comments['body'].replace(np.nan, ' ', regex=True)
# run sentiment analyzer over each comment body
sentiment_df = (
comments['body']
.apply(analyser.polarity_scores)
.astype(str)
.str.strip('{}')
.str.split(', ', expand=True))
# split the emotion output dictionary into new columns
# (thanks to https://stackoverflow.com/a/13053267 for partial solution)
comments['negative_emotion'] = sentiment_df[0].str.split(
': ').str[-1].astype(float)
comments['neutral_emotion'] = sentiment_df[1].str.split(
': ').str[-1].astype(float)
comments['positive_emotion'] = sentiment_df[2].str.split(
': ').str[-1].astype(float)
comments['compound_emotion'] = sentiment_df[3].str.split(
': ').str[-1].astype(float)
# return our dataframe
return comments
def add_gratitude(comments, grateful_list):
"""
Track expressions of gratitude:
* overall counts
* specific words
Thanks to https://stackoverflow.com/a/47686394
Requires: pandas , nltk , collections
Parameters
----------
comments : pd.DataFrame
ideally after `annotate_logs()` and `body_cleanup()`;
can be run with either comments df or issues/tickets df
grateful_list : list or pd.Series of gratitude words to identify;
currently works only with grateful unigrams
Returns
-------
The same dataframe but with new gratitude columns
Examples
--------
>> import pandas as pd
>> import utils
>> comments = pd.read_csv("data/numpy/comments.tsv", sep="\t")
>> comments, tickets = utils.annotate.annotate_logs(comments, tickets)
>> comments = utils.annotate.body_cleanup(comments, bot_list_df)
>> comments = utils.annotate.add_gratitude(comments)
"""
# tokenize and count words
tokenizer = RegexpTokenizer(r'\w+')
comments['tokenized'] = comments['body'].apply(
str.lower).apply(tokenizer.tokenize)
comments['word_count'] = comments['tokenized'].apply(lambda x: Counter(x))
# count words if they're in our grateful list
comments['grateful_count'] = (
comments['word_count'].apply(
lambda x: np.sum([v for k, v in x.items()
if k in grateful_list])))
# let us know which ones were used
comments['grateful_list'] = (
comments['word_count'].apply(
lambda x: [k for k in x if k in grateful_list]))
# remove the columns we don't need anymore
comments = comments.drop(columns=['tokenized', 'word_count'])
# spit back our dataframe now
return comments
| 0
| 0
| 0
|
46a90fe428c07ac7366934d1e4ee7724a8b4f434
| 352
|
py
|
Python
|
packages/Python/lldbsuite/test/python_api/sbtype_typeclass/TestSBTypeTypeClass.py
|
nathawes/swift-lldb
|
3cbf7470e0f9191ec1fc1c69ce8048c1dc64ec77
|
[
"Apache-2.0"
] | 427
|
2018-05-29T14:21:02.000Z
|
2022-03-16T03:17:54.000Z
|
packages/Python/lldbsuite/test/python_api/sbtype_typeclass/TestSBTypeTypeClass.py
|
DalavanCloud/lldb
|
e913eaf2468290fb94c767d474d611b41a84dd69
|
[
"Apache-2.0"
] | 25
|
2018-07-23T08:34:15.000Z
|
2021-11-05T07:13:36.000Z
|
packages/Python/lldbsuite/test/python_api/sbtype_typeclass/TestSBTypeTypeClass.py
|
DalavanCloud/lldb
|
e913eaf2468290fb94c767d474d611b41a84dd69
|
[
"Apache-2.0"
] | 52
|
2018-07-19T19:57:32.000Z
|
2022-03-11T16:05:38.000Z
|
from lldbsuite.test import decorators
from lldbsuite.test import lldbinline
lldbinline.MakeInlineTest(
__file__, globals(), [
decorators.skipIfFreeBSD, decorators.skipIfLinux,
decorators.skipIfWindows,
decorators.expectedFailureAll(
oslist=['macosx'], archs=['i386'],
bugnumber='rdar://28656677')])
| 32
| 57
| 0.6875
|
from lldbsuite.test import decorators
from lldbsuite.test import lldbinline
lldbinline.MakeInlineTest(
__file__, globals(), [
decorators.skipIfFreeBSD, decorators.skipIfLinux,
decorators.skipIfWindows,
decorators.expectedFailureAll(
oslist=['macosx'], archs=['i386'],
bugnumber='rdar://28656677')])
| 0
| 0
| 0
|
af4dceb229fa3c43802c126ad350cbf15950b67e
| 1,585
|
bzl
|
Python
|
js/extensions.bzl
|
stoiky/rules_js
|
e61b61b98c2f5c733bf804f78db9f55b1fb2d599
|
[
"Apache-2.0"
] | null | null | null |
js/extensions.bzl
|
stoiky/rules_js
|
e61b61b98c2f5c733bf804f78db9f55b1fb2d599
|
[
"Apache-2.0"
] | null | null | null |
js/extensions.bzl
|
stoiky/rules_js
|
e61b61b98c2f5c733bf804f78db9f55b1fb2d599
|
[
"Apache-2.0"
] | null | null | null |
"""Adapt repository rules in npm_import.bzl to be called from MODULE.bazel
See https://bazel.build/docs/bzlmod#extension-definition
"""
load("//js/private:pnpm_utils.bzl", "pnpm_utils")
load("//js/private:translate_pnpm_lock.bzl", translate_pnpm_lock_lib = "translate_pnpm_lock")
load("//js:npm_import.bzl", "npm_import", "translate_pnpm_lock")
load("//js/private:transitive_closure.bzl", "translate_to_transitive_closure")
npm = module_extension(
implementation = _extension_impl,
tag_classes = {
"translate_pnpm_lock": tag_class(attrs = dict({"name": attr.string()}, **translate_pnpm_lock_lib.attrs)),
# todo: support individual packages as well
# "package": tag_class(attrs = dict({"name": attr.string()}, **_npm_import.attrs)),
},
)
| 42.837838
| 113
| 0.637855
|
"""Adapt repository rules in npm_import.bzl to be called from MODULE.bazel
See https://bazel.build/docs/bzlmod#extension-definition
"""
load("//js/private:pnpm_utils.bzl", "pnpm_utils")
load("//js/private:translate_pnpm_lock.bzl", translate_pnpm_lock_lib = "translate_pnpm_lock")
load("//js:npm_import.bzl", "npm_import", "translate_pnpm_lock")
load("//js/private:transitive_closure.bzl", "translate_to_transitive_closure")
def _extension_impl(module_ctx):
for mod in module_ctx.modules:
for attr in mod.tags.translate_pnpm_lock:
lockfile = pnpm_utils.parse_pnpm_lock(module_ctx.read(attr.pnpm_lock))
trans = translate_to_transitive_closure(lockfile, attr.prod, attr.dev, attr.no_optional)
imports = translate_pnpm_lock_lib.gen_npm_imports(trans, attr)
for i in imports:
# fixme: pass the rest of the kwargs from i
npm_import(
name = i.name,
package = i.package,
version = i.pnpm_version,
link_packages = i.link_packages,
)
translate_pnpm_lock(
name = "npm",
pnpm_lock = attr.pnpm_lock,
)
npm = module_extension(
implementation = _extension_impl,
tag_classes = {
"translate_pnpm_lock": tag_class(attrs = dict({"name": attr.string()}, **translate_pnpm_lock_lib.attrs)),
# todo: support individual packages as well
# "package": tag_class(attrs = dict({"name": attr.string()}, **_npm_import.attrs)),
},
)
| 787
| 0
| 23
|
c7b09eb689ac8f721c4645e55ec33f8b5d1f82bf
| 32,780
|
py
|
Python
|
paasta_tools/tron_tools.py
|
zhaoyanh1202/paasta
|
b0c148786f44476fe351fe410f0b81f0c941f3b6
|
[
"Apache-2.0"
] | null | null | null |
paasta_tools/tron_tools.py
|
zhaoyanh1202/paasta
|
b0c148786f44476fe351fe410f0b81f0c941f3b6
|
[
"Apache-2.0"
] | null | null | null |
paasta_tools/tron_tools.py
|
zhaoyanh1202/paasta
|
b0c148786f44476fe351fe410f0b81f0c941f3b6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015-2018 Yelp Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import difflib
import glob
import hashlib
import json
import logging
import os
import pkgutil
import re
import subprocess
import traceback
from string import Formatter
from typing import List
from typing import Tuple
import yaml
from service_configuration_lib import read_extra_service_information
from service_configuration_lib import read_yaml_file
from service_configuration_lib.spark_config import generate_clusterman_metrics_entries
from service_configuration_lib.spark_config import get_aws_credentials
from service_configuration_lib.spark_config import get_resources_requested
from service_configuration_lib.spark_config import get_spark_conf
from service_configuration_lib.spark_config import K8S_AUTH_FOLDER
from service_configuration_lib.spark_config import stringify_spark_env
from paasta_tools.mesos_tools import mesos_services_running_here
try:
from yaml.cyaml import CSafeDumper as Dumper
except ImportError: # pragma: no cover (no libyaml-dev / pypy)
Dumper = yaml.SafeDumper # type: ignore
from paasta_tools.clusterman import get_clusterman_metrics
from paasta_tools.tron.client import TronClient
from paasta_tools.tron import tron_command_context
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import DockerParameter
from paasta_tools.utils import DockerVolume
from paasta_tools.utils import InstanceConfig
from paasta_tools.utils import InvalidInstanceConfig
from paasta_tools.utils import load_system_paasta_config
from paasta_tools.utils import SystemPaastaConfig
from paasta_tools.utils import load_v2_deployments_json
from paasta_tools.utils import NoConfigurationForServiceError
from paasta_tools.utils import NoDeploymentsAvailable
from paasta_tools.utils import time_cache
from paasta_tools.utils import filter_templates_from_config
from paasta_tools.spark_tools import get_webui_url
from paasta_tools.spark_tools import inject_spark_conf_str
from paasta_tools import monitoring_tools
from paasta_tools.monitoring_tools import list_teams
from typing import Optional
from typing import Dict
from typing import Any
log = logging.getLogger(__name__)
logging.getLogger("tron").setLevel(logging.WARNING)
MASTER_NAMESPACE = "MASTER"
SPACER = "."
VALID_MONITORING_KEYS = set(
json.loads(
pkgutil.get_data("paasta_tools.cli", "schemas/tron_schema.json").decode()
)["definitions"]["job"]["properties"]["monitoring"]["properties"].keys()
)
MESOS_EXECUTOR_NAMES = ("paasta", "spark")
DEFAULT_AWS_REGION = "us-west-2"
clusterman_metrics, _ = get_clusterman_metrics()
class TronConfig(dict):
"""System-level configuration for Tron."""
def get_cluster_name(self):
""":returns The name of the Tron cluster"""
try:
return self["cluster_name"]
except KeyError:
raise TronNotConfigured(
"Could not find name of Tron cluster in system Tron config"
)
def get_url(self):
""":returns The URL for the Tron master's API"""
try:
return self["url"]
except KeyError:
raise TronNotConfigured(
"Could not find URL of Tron master in system Tron config"
)
def decompose_instance(instance):
"""Get (job_name, action_name) from an instance."""
decomposed = instance.split(SPACER)
if len(decomposed) != 2:
raise InvalidInstanceConfig("Invalid instance name: %s" % instance)
return (decomposed[0], decomposed[1])
def decompose_executor_id(executor_id) -> Tuple[str, str, int, str]:
"""(service, job, run_number, action)"""
service, job, str_run_number, action, _ = executor_id.split(SPACER)
return (service, job, int(str_run_number), action)
def parse_time_variables(command: str, parse_time: datetime.datetime = None) -> str:
"""Parses an input string and uses the Tron-style dateparsing
to replace time variables. Currently supports only the date/time
variables listed in the tron documentation:
http://tron.readthedocs.io/en/latest/command_context.html#built-in-cc
:param input_string: input string to be parsed
:param parse_time: Reference Datetime object to parse the date and time strings, defaults to now.
:returns: A string with the date and time variables replaced
"""
if parse_time is None:
parse_time = datetime.datetime.now()
# We build up a tron context object that has the right
# methods to parse tron-style time syntax
job_context = tron_command_context.JobRunContext(
tron_command_context.CommandContext()
)
# The tron context object needs the run_time attribute set so it knows
# how to interpret the date strings
job_context.job_run.run_time = parse_time
return StringFormatter(job_context).format(command)
class TronJobConfig:
"""Represents a job in Tron, consisting of action(s) and job-level configuration values."""
def format_tron_action_dict(action_config):
"""Generate a dict of tronfig for an action, from the TronActionConfig.
:param job_config: TronActionConfig
"""
executor = action_config.get_executor()
result = {
"command": action_config.get_cmd(),
"executor": executor,
"requires": action_config.get_requires(),
"node": action_config.get_node(),
"retries": action_config.get_retries(),
"retries_delay": action_config.get_retries_delay(),
"expected_runtime": action_config.get_expected_runtime(),
"trigger_downstreams": action_config.get_trigger_downstreams(),
"triggered_by": action_config.get_triggered_by(),
"on_upstream_rerun": action_config.get_on_upstream_rerun(),
"trigger_timeout": action_config.get_trigger_timeout(),
}
if executor in MESOS_EXECUTOR_NAMES:
result["executor"] = "mesos"
result["cpus"] = action_config.get_cpus()
result["mem"] = action_config.get_mem()
result["disk"] = action_config.get_disk()
result["env"] = action_config.get_env()
result["extra_volumes"] = format_volumes(action_config.get_extra_volumes())
result["docker_parameters"] = [
{"key": param["key"], "value": param["value"]}
for param in action_config.format_docker_parameters()
]
constraint_labels = ["attribute", "operator", "value"]
result["constraints"] = [
dict(zip(constraint_labels, constraint))
for constraint in action_config.get_calculated_constraints()
]
result["docker_image"] = action_config.get_docker_url()
# Only pass non-None values, so Tron will use defaults for others
return {key: val for key, val in result.items() if val is not None}
def format_tron_job_dict(job_config):
"""Generate a dict of tronfig for a job, from the TronJobConfig.
:param job_config: TronJobConfig
"""
action_dict = {
action_config.get_action_name(): format_tron_action_dict(action_config)
for action_config in job_config.get_actions()
}
result = {
"node": job_config.get_node(),
"schedule": job_config.get_schedule(),
"actions": action_dict,
"monitoring": job_config.get_monitoring(),
"queueing": job_config.get_queueing(),
"run_limit": job_config.get_run_limit(),
"all_nodes": job_config.get_all_nodes(),
"enabled": job_config.get_enabled(),
"allow_overlap": job_config.get_allow_overlap(),
"max_runtime": job_config.get_max_runtime(),
"time_zone": job_config.get_time_zone(),
"expected_runtime": job_config.get_expected_runtime(),
}
cleanup_config = job_config.get_cleanup_action()
if cleanup_config:
cleanup_action = format_tron_action_dict(cleanup_config)
result["cleanup_action"] = cleanup_action
# Only pass non-None values, so Tron will use defaults for others
return {key: val for key, val in result.items() if val is not None}
@time_cache(ttl=5)
def load_tron_service_config_no_cache(
service,
cluster,
load_deployments=True,
soa_dir=DEFAULT_SOA_DIR,
for_validation=False,
):
"""Load all configured jobs for a service, and any additional config values."""
config = read_extra_service_information(
service_name=service, extra_info=f"tron-{cluster}", soa_dir=soa_dir
)
jobs = filter_templates_from_config(config)
job_configs = [
TronJobConfig(
name=name,
service=service,
cluster=cluster,
config_dict=job,
load_deployments=load_deployments,
soa_dir=soa_dir,
for_validation=for_validation,
)
for name, job in jobs.items()
]
return job_configs
def create_complete_config(service, cluster, soa_dir=DEFAULT_SOA_DIR):
"""Generate a namespace configuration file for Tron, for a service."""
job_configs = load_tron_service_config(
service=service, cluster=cluster, load_deployments=True, soa_dir=soa_dir
)
preproccessed_config = {}
preproccessed_config["jobs"] = {
job_config.get_name(): format_tron_job_dict(job_config)
for job_config in job_configs
}
return yaml.dump(preproccessed_config, Dumper=Dumper, default_flow_style=False)
def list_tron_clusters(service: str, soa_dir: str = DEFAULT_SOA_DIR) -> List[str]:
"""Returns the Tron clusters a service is configured to deploy to."""
search_re = r"/tron-([0-9a-z-_]*)\.yaml$"
service_dir = os.path.join(soa_dir, service)
clusters = []
for filename in glob.glob(f"{service_dir}/*.yaml"):
cluster_re_match = re.search(search_re, filename)
if cluster_re_match is not None:
clusters.append(cluster_re_match.group(1))
return clusters
def parse_service_instance_from_executor_id(task_id: str) -> Tuple[str, str]:
"""Parses tron mesos task ids, like schematizer.traffic_generator.28414.turnstyle.46da87d7-6092-4ed4-b926-ffa7b21c7785"""
try:
service, job, job_run, action, uuid = task_id.split(".")
except Exception as e:
log.warning(
f"Couldn't parse the mesos task id into a valid tron job: {task_id}: {e}"
)
service, job, action = "unknown_service", "unknown_job", "unknown_action"
return service, f"{job}.{action}"
| 35.864333
| 125
| 0.652013
|
# Copyright 2015-2018 Yelp Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import difflib
import glob
import hashlib
import json
import logging
import os
import pkgutil
import re
import subprocess
import traceback
from string import Formatter
from typing import List
from typing import Tuple
import yaml
from service_configuration_lib import read_extra_service_information
from service_configuration_lib import read_yaml_file
from service_configuration_lib.spark_config import generate_clusterman_metrics_entries
from service_configuration_lib.spark_config import get_aws_credentials
from service_configuration_lib.spark_config import get_resources_requested
from service_configuration_lib.spark_config import get_spark_conf
from service_configuration_lib.spark_config import K8S_AUTH_FOLDER
from service_configuration_lib.spark_config import stringify_spark_env
from paasta_tools.mesos_tools import mesos_services_running_here
try:
from yaml.cyaml import CSafeDumper as Dumper
except ImportError: # pragma: no cover (no libyaml-dev / pypy)
Dumper = yaml.SafeDumper # type: ignore
from paasta_tools.clusterman import get_clusterman_metrics
from paasta_tools.tron.client import TronClient
from paasta_tools.tron import tron_command_context
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import DockerParameter
from paasta_tools.utils import DockerVolume
from paasta_tools.utils import InstanceConfig
from paasta_tools.utils import InvalidInstanceConfig
from paasta_tools.utils import load_system_paasta_config
from paasta_tools.utils import SystemPaastaConfig
from paasta_tools.utils import load_v2_deployments_json
from paasta_tools.utils import NoConfigurationForServiceError
from paasta_tools.utils import NoDeploymentsAvailable
from paasta_tools.utils import time_cache
from paasta_tools.utils import filter_templates_from_config
from paasta_tools.spark_tools import get_webui_url
from paasta_tools.spark_tools import inject_spark_conf_str
from paasta_tools import monitoring_tools
from paasta_tools.monitoring_tools import list_teams
from typing import Optional
from typing import Dict
from typing import Any
log = logging.getLogger(__name__)
logging.getLogger("tron").setLevel(logging.WARNING)
MASTER_NAMESPACE = "MASTER"
SPACER = "."
VALID_MONITORING_KEYS = set(
json.loads(
pkgutil.get_data("paasta_tools.cli", "schemas/tron_schema.json").decode()
)["definitions"]["job"]["properties"]["monitoring"]["properties"].keys()
)
MESOS_EXECUTOR_NAMES = ("paasta", "spark")
DEFAULT_AWS_REGION = "us-west-2"
clusterman_metrics, _ = get_clusterman_metrics()
class TronNotConfigured(Exception):
pass
class InvalidTronConfig(Exception):
pass
class TronConfig(dict):
"""System-level configuration for Tron."""
def __init__(self, config):
super().__init__(config)
def get_cluster_name(self):
""":returns The name of the Tron cluster"""
try:
return self["cluster_name"]
except KeyError:
raise TronNotConfigured(
"Could not find name of Tron cluster in system Tron config"
)
def get_url(self):
""":returns The URL for the Tron master's API"""
try:
return self["url"]
except KeyError:
raise TronNotConfigured(
"Could not find URL of Tron master in system Tron config"
)
def get_tronfig_folder(cluster, soa_dir):
return os.path.join(soa_dir, "tron", cluster)
def load_tron_config():
return TronConfig(load_system_paasta_config().get_tron_config())
def get_tron_client():
return TronClient(load_tron_config().get_url())
def compose_instance(job, action):
return f"{job}{SPACER}{action}"
def decompose_instance(instance):
"""Get (job_name, action_name) from an instance."""
decomposed = instance.split(SPACER)
if len(decomposed) != 2:
raise InvalidInstanceConfig("Invalid instance name: %s" % instance)
return (decomposed[0], decomposed[1])
def decompose_executor_id(executor_id) -> Tuple[str, str, int, str]:
"""(service, job, run_number, action)"""
service, job, str_run_number, action, _ = executor_id.split(SPACER)
return (service, job, int(str_run_number), action)
class StringFormatter(Formatter):
def __init__(self, context=None):
Formatter.__init__(self)
self.context = context
def get_value(self, key, args, kwds):
if isinstance(key, str):
try:
return kwds[key]
except KeyError:
return self.context[key]
else:
return Formatter.get_value(key, args, kwds)
def parse_time_variables(command: str, parse_time: datetime.datetime = None) -> str:
"""Parses an input string and uses the Tron-style dateparsing
to replace time variables. Currently supports only the date/time
variables listed in the tron documentation:
http://tron.readthedocs.io/en/latest/command_context.html#built-in-cc
:param input_string: input string to be parsed
:param parse_time: Reference Datetime object to parse the date and time strings, defaults to now.
:returns: A string with the date and time variables replaced
"""
if parse_time is None:
parse_time = datetime.datetime.now()
# We build up a tron context object that has the right
# methods to parse tron-style time syntax
job_context = tron_command_context.JobRunContext(
tron_command_context.CommandContext()
)
# The tron context object needs the run_time attribute set so it knows
# how to interpret the date strings
job_context.job_run.run_time = parse_time
return StringFormatter(job_context).format(command)
def pick_spark_ui_port(service, instance):
# We don't know what ports will be available on the agent that the driver
# will be scheduled on, so we just try to make them unique per service / instance.
hash_key = f"{service} {instance}".encode()
hash_number = int(hashlib.sha1(hash_key).hexdigest(), 16)
preferred_port = 33000 + (hash_number % 25000)
return preferred_port
class TronActionConfig(InstanceConfig):
config_filename_prefix = "tron"
def __init__(
self,
service,
instance,
cluster,
config_dict,
branch_dict,
soa_dir=DEFAULT_SOA_DIR,
for_validation=False,
):
super().__init__(
cluster=cluster,
instance=instance,
service=service,
config_dict=config_dict,
branch_dict=branch_dict,
soa_dir=soa_dir,
)
self.job, self.action = decompose_instance(instance)
# Indicate whether this config object is created for validation
self.for_validation = for_validation
def get_spark_config_dict(self):
spark_config_dict = getattr(self, "_spark_config_dict", None)
# cached the created dict, so that we don't need to process it multiple
# times, and having inconsistent result
if spark_config_dict is not None:
return spark_config_dict
if self.get_spark_cluster_manager() == "mesos":
mesos_leader = (
f"zk://{load_system_paasta_config().get_zk_hosts()}"
if not self.for_validation
else "N/A"
)
else:
mesos_leader = None
aws_creds = get_aws_credentials(
aws_credentials_yaml=self.config_dict.get("aws_credentials_yaml")
)
self._spark_config_dict = get_spark_conf(
cluster_manager=self.get_spark_cluster_manager(),
spark_app_base_name=f"tron_spark_{self.get_service()}_{self.get_instance()}",
user_spark_opts=self.config_dict.get("spark_args", {}),
paasta_cluster=self.get_spark_paasta_cluster(),
paasta_pool=self.get_spark_paasta_pool(),
paasta_service=self.get_service(),
paasta_instance=self.get_instance(),
docker_img=self.get_docker_url(),
aws_creds=aws_creds,
extra_volumes=self.get_volumes(load_system_paasta_config().get_volumes()),
# tron is using environment variable to load the required creds
with_secret=False,
mesos_leader=mesos_leader,
# load_system_paasta already load the default volumes
load_paasta_default_volumes=False,
)
return self._spark_config_dict
def get_job_name(self):
return self.job
def get_action_name(self):
return self.action
def get_deploy_group(self) -> Optional[str]:
return self.config_dict.get("deploy_group", None)
def get_docker_url(
self, system_paasta_config: Optional[SystemPaastaConfig] = None
) -> str:
# It's okay for tronfig to contain things that aren't deployed yet - it's normal for developers to
# push tronfig well before the job is scheduled to run, and either they'll deploy the service before
# or get notified when the job fails.
#
# This logic ensures that we can still pass validation and run setup_tron_namespace even if
# there's nothing in deployments.json yet.
return (
""
if not self.get_docker_image()
else super().get_docker_url(system_paasta_config=system_paasta_config)
)
def get_cmd(self):
command = self.config_dict.get("command")
if self.get_executor() == "spark":
# Spark expects to be able to write to MESOS_SANDBOX if it is set
# but the default value (/mnt/mesos/sandbox) doesn't get mounted in
# our Docker containers, so we unset it here. (Un-setting is fine,
# since Spark will just write to /tmp instead).
command = "unset MESOS_DIRECTORY MESOS_SANDBOX; " + inject_spark_conf_str(
command, stringify_spark_env(self.get_spark_config_dict())
)
return command
def get_spark_paasta_cluster(self):
return self.config_dict.get("spark_paasta_cluster", self.get_cluster())
def get_spark_paasta_pool(self):
return self.config_dict.get("spark_paasta_pool", "batch")
def get_spark_cluster_manager(self):
return self.config_dict.get("spark_cluster_manager", "mesos")
def get_env(self):
env = super().get_env()
if self.get_executor() == "spark":
spark_config_dict = self.get_spark_config_dict()
env["EXECUTOR_CLUSTER"] = self.get_spark_paasta_cluster()
env["EXECUTOR_POOL"] = self.get_spark_paasta_pool()
env["SPARK_OPTS"] = stringify_spark_env(spark_config_dict)
# The actual mesos secret will be decrypted and injected on mesos master when assigning
# tasks.
env["SPARK_MESOS_SECRET"] = "SHARED_SECRET(SPARK_MESOS_SECRET)"
if clusterman_metrics:
env["CLUSTERMAN_RESOURCES"] = json.dumps(
generate_clusterman_metrics_entries(
clusterman_metrics,
get_resources_requested(spark_config_dict),
spark_config_dict["spark.app.name"],
get_webui_url(spark_config_dict["spark.ui.port"]),
)
)
else:
env["CLUSTERMAN_RESOURCES"] = "{}"
if "AWS_ACCESS_KEY_ID" not in env or "AWS_SECRET_ACCESS_KEY" not in env:
try:
access_key, secret_key, session_token = get_aws_credentials(
service=self.get_service(),
aws_credentials_yaml=self.config_dict.get(
"aws_credentials_yaml"
),
)
env["AWS_ACCESS_KEY_ID"] = access_key
env["AWS_SECRET_ACCESS_KEY"] = secret_key
except Exception:
log.warning(
f"Cannot set AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment "
f"variables for tron action {self.get_instance()} of service "
f"{self.get_service()} via credentail file. Traceback:\n"
f"{traceback.format_exc()}"
)
if "AWS_DEFAULT_REGION" not in env:
env["AWS_DEFAULT_REGION"] = DEFAULT_AWS_REGION
return env
def get_extra_volumes(self):
extra_volumes = super().get_extra_volumes()
if (
self.get_executor() == "spark"
and self.get_spark_cluster_manager() == "kubernetes"
):
extra_volumes.append(
DockerVolume(
{
"hostPath": "/etc/pki/spark",
"containerPath": K8S_AUTH_FOLDER,
"mode": "RO",
}
)
)
return extra_volumes
def get_cpu_burst_add(self) -> float:
""" For Tron jobs, we don't let them burst by default, because they
don't represent "real-time" workloads, and should not impact
neighbors """
return self.config_dict.get("cpu_burst_add", 0)
def get_executor(self):
return self.config_dict.get("executor", "paasta")
def get_healthcheck_mode(self, _) -> None:
return None
def get_node(self):
return self.config_dict.get("node")
def get_retries(self):
return self.config_dict.get("retries")
def get_retries_delay(self):
return self.config_dict.get("retries_delay")
def get_requires(self):
return self.config_dict.get("requires")
def get_expected_runtime(self):
return self.config_dict.get("expected_runtime")
def get_triggered_by(self):
return self.config_dict.get("triggered_by", None)
def get_trigger_downstreams(self):
return self.config_dict.get("trigger_downstreams", None)
def get_on_upstream_rerun(self):
return self.config_dict.get("on_upstream_rerun", None)
def get_trigger_timeout(self):
return self.config_dict.get("trigger_timeout", None)
def get_calculated_constraints(self):
"""Combine all configured Mesos constraints."""
constraints = self.get_constraints()
if constraints is not None:
return constraints
else:
constraints = self.get_extra_constraints()
constraints.extend(
self.get_deploy_constraints(
blacklist=self.get_deploy_blacklist(),
whitelist=self.get_deploy_whitelist(),
# Don't have configs for the paasta cluster
system_deploy_blacklist=[],
system_deploy_whitelist=None,
)
)
constraints.extend(self.get_pool_constraints())
return constraints
def get_nerve_namespace(self) -> None:
return None
def validate(self):
error_msgs = []
error_msgs.extend(super().validate())
# Tron is a little special, because it can *not* have a deploy group
# But only if an action is running via ssh and not via paasta
if (
self.get_deploy_group() is None
and self.get_executor() in MESOS_EXECUTOR_NAMES
):
error_msgs.append(
f"{self.get_job_name()}.{self.get_action_name()} must have a deploy_group set"
)
return error_msgs
def format_docker_parameters(
self,
with_labels: bool = True,
system_paasta_config: Optional[SystemPaastaConfig] = None,
) -> List[DockerParameter]:
"""Formats extra flags for running docker. Will be added in the format
`["--%s=%s" % (e['key'], e['value']) for e in list]` to the `docker run` command
Note: values must be strings"""
parameters = super().format_docker_parameters(
with_labels=with_labels, system_paasta_config=system_paasta_config
)
if self.get_executor() == "spark":
parameters.append({"key": "net", "value": "host"})
return parameters
class TronJobConfig:
"""Represents a job in Tron, consisting of action(s) and job-level configuration values."""
def __init__(
self,
name: str,
config_dict: Dict[str, Any],
cluster: str,
service: Optional[str] = None,
load_deployments: bool = True,
soa_dir: str = DEFAULT_SOA_DIR,
for_validation: bool = False,
) -> None:
self.name = name
self.config_dict = config_dict
self.cluster = cluster
self.service = service
self.load_deployments = load_deployments
self.soa_dir = soa_dir
# Indicate whether this config object is created for validation
self.for_validation = for_validation
def get_name(self):
return self.name
def get_node(self):
return self.config_dict.get("node", "paasta")
def get_schedule(self):
return self.config_dict.get("schedule")
def get_monitoring(self):
srv_monitoring = dict(
monitoring_tools.read_monitoring_config(self.service, soa_dir=self.soa_dir)
)
tron_monitoring = self.config_dict.get("monitoring", {})
srv_monitoring.update(tron_monitoring)
# filter out non-tron monitoring keys
srv_monitoring = {
k: v for k, v in srv_monitoring.items() if k in VALID_MONITORING_KEYS
}
return srv_monitoring
def get_queueing(self):
return self.config_dict.get("queueing")
def get_run_limit(self):
return self.config_dict.get("run_limit")
def get_all_nodes(self):
return self.config_dict.get("all_nodes")
def get_enabled(self):
return self.config_dict.get("enabled")
def get_allow_overlap(self):
return self.config_dict.get("allow_overlap")
def get_max_runtime(self):
return self.config_dict.get("max_runtime")
def get_time_zone(self):
return self.config_dict.get("time_zone")
def get_service(self) -> Optional[str]:
return self.service or self.config_dict.get("service")
def get_deploy_group(self) -> Optional[str]:
return self.config_dict.get("deploy_group", None)
def get_cluster(self):
return self.cluster
def get_expected_runtime(self):
return self.config_dict.get("expected_runtime")
def _get_action_config(self, action_name, action_dict):
action_service = action_dict.setdefault("service", self.get_service())
action_deploy_group = action_dict.setdefault(
"deploy_group", self.get_deploy_group()
)
if action_service and action_deploy_group and self.load_deployments:
try:
deployments_json = load_v2_deployments_json(
service=action_service, soa_dir=self.soa_dir
)
branch_dict = {
"docker_image": deployments_json.get_docker_image_for_deploy_group(
action_deploy_group
),
"git_sha": deployments_json.get_git_sha_for_deploy_group(
action_deploy_group
),
# TODO: add Tron instances when generating deployments json
"desired_state": "start",
"force_bounce": None,
}
except NoDeploymentsAvailable:
log.warning(
f'Docker image unavailable for {action_service}.{self.get_name()}.{action_dict.get("name")}'
" is it deployed yet?"
)
branch_dict = None
else:
branch_dict = None
action_dict["monitoring"] = self.get_monitoring()
return TronActionConfig(
service=action_service,
instance=compose_instance(self.get_name(), action_name),
cluster=self.get_cluster(),
config_dict=action_dict,
branch_dict=branch_dict,
soa_dir=self.soa_dir,
for_validation=self.for_validation,
)
def get_actions(self):
actions = self.config_dict.get("actions")
return [
self._get_action_config(name, action_dict)
for name, action_dict in actions.items()
]
def get_cleanup_action(self):
action_dict = self.config_dict.get("cleanup_action")
if not action_dict:
return None
# TODO: we should keep this trickery outside paasta repo
return self._get_action_config("cleanup", action_dict)
def check_monitoring(self) -> Tuple[bool, str]:
monitoring = self.get_monitoring()
valid_teams = list_teams()
if monitoring is not None:
team_name = monitoring.get("team", None)
if team_name is None:
return False, "Team name is required for monitoring"
elif team_name not in valid_teams:
suggest_teams = difflib.get_close_matches(
word=team_name, possibilities=valid_teams
)
return (
False,
f"Invalid team name: {team_name}. Do you mean one of these: {suggest_teams}",
)
return True, ""
def check_actions(self) -> Tuple[bool, List[str]]:
actions = self.get_actions()
cleanup_action = self.get_cleanup_action()
if cleanup_action:
actions.append(cleanup_action)
checks_passed = True
msgs: List[str] = []
for action in actions:
action_msgs = action.validate()
if action_msgs:
checks_passed = False
msgs.extend(action_msgs)
return checks_passed, msgs
def validate(self) -> List[str]:
_, error_msgs = self.check_actions()
checks = ["check_monitoring"]
for check in checks:
check_passed, check_msg = getattr(self, check)()
if not check_passed:
error_msgs.append(check_msg)
return error_msgs
def __eq__(self, other):
if isinstance(other, type(self)):
return self.config_dict == other.config_dict
return False
def format_volumes(paasta_volume_list):
return [
{
"container_path": v["containerPath"],
"host_path": v["hostPath"],
"mode": v["mode"],
}
for v in paasta_volume_list
]
def format_master_config(master_config, default_volumes, dockercfg_location):
mesos_options = master_config.get("mesos_options", {})
mesos_options.update(
{
"default_volumes": format_volumes(default_volumes),
"dockercfg_location": dockercfg_location,
}
)
master_config["mesos_options"] = mesos_options
return master_config
def format_tron_action_dict(action_config):
"""Generate a dict of tronfig for an action, from the TronActionConfig.
:param job_config: TronActionConfig
"""
executor = action_config.get_executor()
result = {
"command": action_config.get_cmd(),
"executor": executor,
"requires": action_config.get_requires(),
"node": action_config.get_node(),
"retries": action_config.get_retries(),
"retries_delay": action_config.get_retries_delay(),
"expected_runtime": action_config.get_expected_runtime(),
"trigger_downstreams": action_config.get_trigger_downstreams(),
"triggered_by": action_config.get_triggered_by(),
"on_upstream_rerun": action_config.get_on_upstream_rerun(),
"trigger_timeout": action_config.get_trigger_timeout(),
}
if executor in MESOS_EXECUTOR_NAMES:
result["executor"] = "mesos"
result["cpus"] = action_config.get_cpus()
result["mem"] = action_config.get_mem()
result["disk"] = action_config.get_disk()
result["env"] = action_config.get_env()
result["extra_volumes"] = format_volumes(action_config.get_extra_volumes())
result["docker_parameters"] = [
{"key": param["key"], "value": param["value"]}
for param in action_config.format_docker_parameters()
]
constraint_labels = ["attribute", "operator", "value"]
result["constraints"] = [
dict(zip(constraint_labels, constraint))
for constraint in action_config.get_calculated_constraints()
]
result["docker_image"] = action_config.get_docker_url()
# Only pass non-None values, so Tron will use defaults for others
return {key: val for key, val in result.items() if val is not None}
def format_tron_job_dict(job_config):
"""Generate a dict of tronfig for a job, from the TronJobConfig.
:param job_config: TronJobConfig
"""
action_dict = {
action_config.get_action_name(): format_tron_action_dict(action_config)
for action_config in job_config.get_actions()
}
result = {
"node": job_config.get_node(),
"schedule": job_config.get_schedule(),
"actions": action_dict,
"monitoring": job_config.get_monitoring(),
"queueing": job_config.get_queueing(),
"run_limit": job_config.get_run_limit(),
"all_nodes": job_config.get_all_nodes(),
"enabled": job_config.get_enabled(),
"allow_overlap": job_config.get_allow_overlap(),
"max_runtime": job_config.get_max_runtime(),
"time_zone": job_config.get_time_zone(),
"expected_runtime": job_config.get_expected_runtime(),
}
cleanup_config = job_config.get_cleanup_action()
if cleanup_config:
cleanup_action = format_tron_action_dict(cleanup_config)
result["cleanup_action"] = cleanup_action
# Only pass non-None values, so Tron will use defaults for others
return {key: val for key, val in result.items() if val is not None}
def load_tron_instance_config(
service: str,
instance: str,
cluster: str,
load_deployments: bool = True,
soa_dir: str = DEFAULT_SOA_DIR,
) -> TronActionConfig:
jobs = load_tron_service_config(
service=service,
cluster=cluster,
load_deployments=load_deployments,
soa_dir=soa_dir,
)
requested_job, requested_action = instance.split(".")
for job in jobs:
if job.get_name() == requested_job:
for action in job.get_actions():
if action.get_action_name() == requested_action:
return action
raise NoConfigurationForServiceError(
f"No tron configuration found for {service} {instance}"
)
@time_cache(ttl=5)
def load_tron_service_config(
service,
cluster,
load_deployments=True,
soa_dir=DEFAULT_SOA_DIR,
for_validation=False,
):
return load_tron_service_config_no_cache(
service, cluster, load_deployments, soa_dir, for_validation,
)
def load_tron_service_config_no_cache(
service,
cluster,
load_deployments=True,
soa_dir=DEFAULT_SOA_DIR,
for_validation=False,
):
"""Load all configured jobs for a service, and any additional config values."""
config = read_extra_service_information(
service_name=service, extra_info=f"tron-{cluster}", soa_dir=soa_dir
)
jobs = filter_templates_from_config(config)
job_configs = [
TronJobConfig(
name=name,
service=service,
cluster=cluster,
config_dict=job,
load_deployments=load_deployments,
soa_dir=soa_dir,
for_validation=for_validation,
)
for name, job in jobs.items()
]
return job_configs
def create_complete_master_config(cluster, soa_dir=DEFAULT_SOA_DIR):
system_paasta_config = load_system_paasta_config()
tronfig_folder = get_tronfig_folder(soa_dir=soa_dir, cluster=cluster)
config = read_yaml_file(os.path.join(tronfig_folder, f"MASTER.yaml"))
master_config = format_master_config(
config,
system_paasta_config.get_volumes(),
system_paasta_config.get_dockercfg_location(),
)
return yaml.dump(master_config, Dumper=Dumper, default_flow_style=False)
def create_complete_config(service, cluster, soa_dir=DEFAULT_SOA_DIR):
"""Generate a namespace configuration file for Tron, for a service."""
job_configs = load_tron_service_config(
service=service, cluster=cluster, load_deployments=True, soa_dir=soa_dir
)
preproccessed_config = {}
preproccessed_config["jobs"] = {
job_config.get_name(): format_tron_job_dict(job_config)
for job_config in job_configs
}
return yaml.dump(preproccessed_config, Dumper=Dumper, default_flow_style=False)
def validate_complete_config(
service: str, cluster: str, soa_dir: str = DEFAULT_SOA_DIR
) -> List[str]:
job_configs = load_tron_service_config(
service=service,
cluster=cluster,
load_deployments=False,
soa_dir=soa_dir,
for_validation=True,
)
# PaaSTA-specific validation
for job_config in job_configs:
check_msgs = job_config.validate()
if check_msgs:
return check_msgs
master_config_path = os.path.join(
os.path.abspath(soa_dir), "tron", cluster, MASTER_NAMESPACE + ".yaml"
)
preproccessed_config = {}
# Use Tronfig on generated config from PaaSTA to validate the rest
preproccessed_config["jobs"] = {
job_config.get_name(): format_tron_job_dict(job_config)
for job_config in job_configs
}
complete_config = yaml.dump(preproccessed_config, Dumper=Dumper)
proc = subprocess.run(
["tronfig", "-", "-V", "-n", service, "-m", master_config_path],
input=complete_config,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
)
if proc.returncode != 0:
process_errors = proc.stderr.strip()
if process_errors: # Error running tronfig
print(proc.stderr)
return [proc.stdout.strip()]
return []
def get_tron_namespaces(cluster, soa_dir):
tron_config_file = f"tron-{cluster}.yaml"
config_dirs = [
_dir[0]
for _dir in os.walk(os.path.abspath(soa_dir))
if tron_config_file in _dir[2]
]
namespaces = [os.path.split(config_dir)[1] for config_dir in config_dirs]
return namespaces
def list_tron_clusters(service: str, soa_dir: str = DEFAULT_SOA_DIR) -> List[str]:
"""Returns the Tron clusters a service is configured to deploy to."""
search_re = r"/tron-([0-9a-z-_]*)\.yaml$"
service_dir = os.path.join(soa_dir, service)
clusters = []
for filename in glob.glob(f"{service_dir}/*.yaml"):
cluster_re_match = re.search(search_re, filename)
if cluster_re_match is not None:
clusters.append(cluster_re_match.group(1))
return clusters
def get_tron_dashboard_for_cluster(cluster: str):
dashboards = load_system_paasta_config().get_dashboard_links()[cluster]
if "Tron" not in dashboards:
raise Exception(f"tron api endpoint is not defined for cluster {cluster}")
return dashboards["Tron"]
def tron_jobs_running_here() -> List[Tuple[str, str, int]]:
return mesos_services_running_here(
framework_filter=lambda fw: fw["name"].startswith("tron"),
parse_service_instance_from_executor_id=parse_service_instance_from_executor_id,
)
def parse_service_instance_from_executor_id(task_id: str) -> Tuple[str, str]:
"""Parses tron mesos task ids, like schematizer.traffic_generator.28414.turnstyle.46da87d7-6092-4ed4-b926-ffa7b21c7785"""
try:
service, job, job_run, action, uuid = task_id.split(".")
except Exception as e:
log.warning(
f"Couldn't parse the mesos task id into a valid tron job: {task_id}: {e}"
)
service, job, action = "unknown_service", "unknown_job", "unknown_action"
return service, f"{job}.{action}"
| 18,261
| 2,480
| 1,114
|
6f6564a4b79638714786a730792e5cd34d3f9e05
| 1,755
|
py
|
Python
|
invenio_records_presentation/workflows/presentation.py
|
CESNET/invenio-records-presentation
|
547a2652a97feb1c6cd50e1ea917c2b5decb9286
|
[
"MIT"
] | null | null | null |
invenio_records_presentation/workflows/presentation.py
|
CESNET/invenio-records-presentation
|
547a2652a97feb1c6cd50e1ea917c2b5decb9286
|
[
"MIT"
] | 4
|
2019-03-19T16:18:22.000Z
|
2021-06-28T12:33:14.000Z
|
invenio_records_presentation/workflows/presentation.py
|
CESNET/invenio-records-presentation
|
547a2652a97feb1c6cd50e1ea917c2b5decb9286
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 CESNET.
#
# Invenio Records Presentation is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
""" Example Presentation workflow."""
from invenio_workflows import WorkflowEngine
from invenio_records_presentation.api import PresentationOutputFile
from invenio_records_presentation.workflows import presentation_workflow_factory
example = presentation_workflow_factory(task_list=[
print_extra_data,
create_example_file,
print_data,
transform_example_file,
output_example_file,
])
| 27
| 89
| 0.688889
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 CESNET.
#
# Invenio Records Presentation is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
""" Example Presentation workflow."""
from invenio_workflows import WorkflowEngine
from invenio_records_presentation.api import PresentationOutputFile
from invenio_records_presentation.workflows import presentation_workflow_factory
def print_extra_data(obj, eng: WorkflowEngine):
print(obj.extra_data)
return obj
def print_data(obj, eng: WorkflowEngine):
print(obj.data)
return obj
def create_example_file(obj, eng: WorkflowEngine):
# creates an example input file and passes a path to it
input = obj.scratch.create_file(task_name='example_input')
with open(input, 'w') as tf:
tf.write("example file\n")
obj.data = input
return obj
def transform_example_file(obj, eng: WorkflowEngine):
input_data = ''
try:
with open(obj.data, 'r') as input:
input_data = input.read()
except OSError:
eng.abort() # Cannot read input data, abort workflow execution
output = obj.scratch.create_file(task_name='example_output')
with open(output, 'w') as tf:
tf.write(input_data.title())
obj.data = output
return obj
def output_example_file(obj, eng: WorkflowEngine):
obj.data = PresentationOutputFile(path=obj.data,
mimetype='text/plain',
filename='example.txt')
return obj
example = presentation_workflow_factory(task_list=[
print_extra_data,
create_example_file,
print_data,
transform_example_file,
output_example_file,
])
| 1,008
| 0
| 115
|
af18231ed684c46a269b36519eb707e9ab6b7d6a
| 34,191
|
py
|
Python
|
twit_analytics.py
|
nikb999/Twitter-analytics
|
35074503be495e62fad282b9c723756df87119a7
|
[
"MIT"
] | null | null | null |
twit_analytics.py
|
nikb999/Twitter-analytics
|
35074503be495e62fad282b9c723756df87119a7
|
[
"MIT"
] | null | null | null |
twit_analytics.py
|
nikb999/Twitter-analytics
|
35074503be495e62fad282b9c723756df87119a7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#add the path of the twitter egg
import sys
egg_path = '/home/users/web/........./cgi-bin/PyPkg/twitter-1.14.3-py2.7.egg'
sys.path.append(egg_path)
# Import the CGI, string, sys, and md5crypt modules
import json, urllib2, re, time, datetime, sys, cgi, os
import sqlite3
import MySQLdb as mdb
import string, random
from urlparse import urlparse
from twitter import *
from tempfile import TemporaryFile
from collections import *
from py_site_header import *
def lex_anal(incomingTweetList):
'''
routine to do lexical analysis
'''
#final_tweet_list --- date / sender full name / tweet
#read the tweets and create a list of sender-htag and sender-@
#incoming TweetList has two layer lists
sender_htag = []
sender_at = []
h_tags_all = []
at_items_all = []
ts_all = []
for lex2 in incomingTweetList:
for lex22 in lex2:
td = lex22[0] #this is the tweet date
try:
ts = text_sanitize(lex22[1]) #this is the tweet sender
except:
print 'something wrong with ',lex22[1]
ts = '---'
ts_all.append(ts)
h_tags = re.findall('[#]\w+',lex22[2]) #these are the h-tags
at_items = re.findall('[@]\w+',lex22[2]) #these are the other users
h_tags = [hti.lower() for hti in h_tags]
at_items = [ati.lower() for ati in at_items]
for h2 in h_tags:
sender_htag.append([td,ts.lower()+'-'+h2])
h_tags_all.append(h2)
for at2 in at_items:
sender_at.append([td,ts.lower()+'-'+at2])
at_items_all.append(at2)
#summarize the two new lists
#following lists don't have dates
sender_htag2 = [xx[1] for xx in sender_htag]
sender_at2 = [yy[1] for yy in sender_at]
#make a list of the tweet senders only
ts_all = list(set(ts_all))
#print ts_all
#get the top 10 htags
#py2.6 ht_col = collections.Counter(h_tags_all)
htag_data4heatmap = []
at_data4heatmap = []
#print '<ul>Top 10 Hashtags'
#py2.6 for h_item in ht_col.most_common(10):
for h_item in top_list(h_tags_all,10):
#print '<li>', h_item, '</li>'
#count the number of times each of the hastag was referenced by each tweet sender
try:
for tsitem in ts_all:
try:
itemtocount = str(tsitem+'-'+h_item[1])
htag_data4heatmap.append([tsitem,h_item[1], sender_htag2.count(itemtocount)])
except:
print 'Problem here: ',h_item,tsitem
except:
print 'Problem here',h_item
print '</ul>'
#get the top 10 user references
#py2.6 at_col = collections.Counter(at_items_all)
#print '<ul>Top 10 Users'
#py2.6 for a_item in at_col.most_common(10):
for a_item in top_list(at_items_all,10):
#print '<li>', a_item, '</li>'
#count the number of times each of the hastag was referenced by each tweet sender
try:
for tsitem in ts_all:
itemtocount = str(tsitem+'-'+a_item[1])
at_data4heatmap.append([tsitem,a_item[1], sender_at2.count(itemtocount)])
except:
print 'Problem here 2',a_item
print '</ul>'
#draw the table with the heatmap
tcols = len(ts_all) #number of tweet senders - rows
trows = len(htag_data4heatmap) / tcols #number of hastags - cols
#print trows, tcols
if trows>0:
print '<br><br>'
print '<h3>Most Popular Hashtags</h3>'
heatmap_table(trows,tcols,htag_data4heatmap)
tcols = len(ts_all) #number of tweet senders - rows
trows = len(at_data4heatmap) / tcols #number of hastags - cols
#print trows, tcols
if trows>0:
print '<br><br>'
print '<h3>Most Referenced Users</h3>'
heatmap_table(trows,tcols,at_data4heatmap)
# Define main function.
main()
| 40.800716
| 197
| 0.534176
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#add the path of the twitter egg
import sys
egg_path = '/home/users/web/........./cgi-bin/PyPkg/twitter-1.14.3-py2.7.egg'
sys.path.append(egg_path)
# Import the CGI, string, sys, and md5crypt modules
import json, urllib2, re, time, datetime, sys, cgi, os
import sqlite3
import MySQLdb as mdb
import string, random
from urlparse import urlparse
from twitter import *
from tempfile import TemporaryFile
from collections import *
from py_site_header import *
def thisPYfile():
return 'twit_analytics.py'
def define_keys():
CONSUMER_KEY="......................"
CONSUMER_SECRET="...................."
ACCESS_TOKEN="..........................."
ACCESS_TOKEN_SECRET="...................................."
return CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET
def start_database_to_store_tweets():
dbhost="......................" # Host name
dbuser="......." # Mysql username
dbpswd="......." # Mysql password
dbname = '........' # MySql db
try:
conn = mdb.connect(host=dbhost,user=dbuser,passwd=dbpswd,db=dbname)
c = conn.cursor()
return c, True, conn
except mdb.Error, e:
return e, False
def site_header(st=''):
site_start()
print '</div>'
site_title(st)
def site_start():
print '''
Content-type:text/html\r\n\r\n
<html>
<div class="wrap" id="wrap_id">
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Financial Models</title>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.3/jquery.min.js"></script>
<script type="text/javascript" src="../js/js_functions.js"></script>
<link rel="stylesheet" href="http://www.w3schools.com/lib/w3.css">
<link rel="stylesheet" href="http://www.w3schools.com/lib/w3-theme-indigo.css">
<link href='http://code.ionicframework.com/ionicons/2.0.1/css/ionicons.min.css' rel='stylesheet' type='text/css'>
<link rel="stylesheet" href="http://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.4.0/css/font-awesome.min.css">
<style>
a:link { text-decoration: none; }
a:visited { text-decoration: none; }
a:hover { text-decoration: none; }
a:active { text-decoration: none; }
</style>
</head>
<body>
'''
def site_title(s_title):
print '''
<div id="site_title" class="w3-container w3-theme-d4 w3-center w3-padding-jumbo">
<p> </p>
<div class="w3-row w3-jumbo">
'''
print s_title
print '''
<br>
</div>
</div>
'''
def site_footer():
import datetime
curr_year = datetime.datetime.now().strftime("%Y")
print '<div class="w3-container w3-border-top" style="text-align:center">'
print '<p> © 2013-'+curr_year+' | '
print '<a>Contact Us</a> </p>'
print '<p><a href="./termsofuse.py">Terms of Use</a> |',
print '<a href="./home.py#aboutus">About Us</a> </p>'
print '</div>'
print '</form>'
print ' </body>'
print ' </div>' #for the div id = wrap
print ' </html>'
def html_start():
# Start the HLML Block
site_header('Twitter Analytics')
def html_end():
site_footer()
def top_list(in_l,topx):
#function to get the top xx items in a list
# Need this because v2.6 of python does not have Counter in collections
counter = {}
for i in in_l:
counter[i] = counter.get(i, 0) + 1
final_dict = sorted([ (freq,word) for word, freq in counter.items() ], reverse=True)[:topx]
return final_dict
def text_sanitize(in_text):
out_text = in_text.replace("'","")
out_text = out_text.replace("\""," ").replace("\\"," ").replace("="," ").replace("''",'\"').replace("' '",'\"')
return out_text
def generate_form():
html_start()
print '<div id="body_sty">'
print '<p>Explore the world of Twitter and discover information about twitter users, their friends and followers as well as lexical analysis of the tweets.</p>'
print '<TABLE style="display: block;" BORDER = 0>'
print "<FORM METHOD = post ACTION=\'"+thisPYfile()+"\'>"
print "<TR><TH align=\"left\">Screen Name:</TH><TD><INPUT type = text name=\"scn_name\"></TD><TR>"
print "</TABLE>"
print "<INPUT TYPE = hidden NAME = \"action\" VALUE = \"display\">"
print "<INPUT TYPE = submit VALUE = \"Enter\">"
print "</FORM>"
print '</div>'
html_end()
def user_public_info(find_id_for):
#html_start()
#this line gets the public info for the user
print '<h2>'+'\nUsers Public Info'+'</h2>'
do_rest_of_module = 0
try:
t = Twitter(auth=OAuth(define_keys()[2],define_keys()[3],define_keys()[0],define_keys()[1]))
response = t.users.lookup(screen_name=find_id_for)
do_rest_of_module = 1
except:
print '<p>', 'Error getting public data' ,'</p>'
if do_rest_of_module == 1:
print '<h3>'+'\nBasic Info for: ', find_id_for+'</h3>'
print '<p>', '\tKey Data' ,'</p>'
print '<ul>'
print '<li>ID:',response[0]['id'],'</li>'
print '<li>Screen Name:',response[0]['screen_name'],'</li>'
print '<li>Name:',response[0]['name'] ,'</li>'
print '<li>Location:',response[0]['location'] ,'</li>'
print '<li>Friends:',response[0]['friends_count'] ,'</li>'
print '<li>Followers:',response[0]['followers_count'] ,'</li>'
print '<li>Messages posted:',response[0]['statuses_count'] ,'</li>'
print '</ul>'
def get_last200_tweets(in_user):
#this method will get the last 200 tweets of the user
#rate limit is 180 requests per 15 min window
#print '<h2>'+'\nAnalysis of Past Tweets for',in_user,'</h2>'
do_rest_of_module = 0
try:
t = Twitter(auth=OAuth(define_keys()[2],define_keys()[3],define_keys()[0],define_keys()[1]))
response=t.statuses.user_timeline(screen_name=in_user,count=200)
#print '<p>', '\tResponses left:', response.headers['x-rate-limit-remaining'] ,'</p>'
#print '<p>Line 201. Response length: ',len(response),'</p>'
if len(response) > 0:
do_rest_of_module = 1
else:
print '<p>', 'No info found for: ',in_user ,'</p>'
except:
print '<p>', 'Error getting tweets info for: ',in_user ,'</p>'
if do_rest_of_module == 1:
base_twit_list = []
data_for_plots = []
x = response
#x = [element.lower() for element in response] #x is list - LOWER CASE
hashtag_list = [] #start an empty list of hashtags
at_list = [] #start an empty list of twitter IDs
re_twt_list = [] #start a list of retweets
#get the start and end dates
sdf = x[0]['created_at'] #get the full date of last tweet
start_date = datetime.date(int(sdf[26:30]), int(time.strptime(sdf[4:7],'%b').tm_mon), int(sdf[8:10]))
edf = x[len(x)-1]['created_at'] #get the full date of first tweet
end_date = datetime.date(int(edf[26:30]), int(time.strptime(edf[4:7],'%b').tm_mon), int(edf[8:10]))
#end_date = str(edf[8:10])+'-'+str(edf[4:7])+'-'+str(edf[26:30])
twit_day_range = (start_date-end_date).days
avg_twit_day = (1.0*len(x)/max(1,twit_day_range))
print >> t2, '<h4>'+'Tweet Stats for ', in_user+'</h4>'
#print x[0]
#print '\tStats for last',len(x), 'tweets by',in_user
fix_nm = x[0]['user']['screen_name']
try:
if str(x[0]['user']['name']).decode('ascii'): fix_nm = str(x[0]['user']['name'])
except:
#print 'something wrong with the name for ', x[0]['user']['name']
fix_nm = x[0]['user']['screen_name']
print >> t2, '<ul>'
print >> t2, '<li>Key Personal Data</li>'
print >> t2, '<ul>'
print >> t2, '<li>ID:',x[0]['user']['id'],'</li>'
print >> t2, '<li>Screen Name:',x[0]['user']['screen_name'],'</li>'
print >> t2, '<li>Name:',fix_nm,'</li>'
#print '<li>Location:',x[0]['user']['location'],'</li>'
print >> t2, '<li>Friends:',x[0]['user']['friends_count'] ,'</li>'
print >> t2, '<li>Followers:',x[0]['user']['followers_count'] ,'</li>'
print >> t2, '<li>Messages posted:',x[0]['user']['statuses_count'] ,'</li>'
foll_frnd_rat = 1.0*x[0]['user']['followers_count'] / max(1,x[0]['user']['friends_count'])
print >> t2, '<li>Follower to Friend Ratio:', '%.1f' %(foll_frnd_rat),'</li>'
print >> t2, '</ul>'
print >> t2, '</ul>'
print >> t2, '<ul>'
print >> t2, '<li>',len(x),'tweets in past',twit_day_range,'days',
print >> t2, '(',end_date,'to',start_date,')' ,'</li>'
print >> t2, '<li>', 'Avg of ','%.1f' %(avg_twit_day),'tweets per day' ,'</li>'
#add info to the data for charts list
data_for_plots.extend([x[0]['user']['screen_name']])
data_for_plots.extend([x[0]['user']['friends_count']])
data_for_plots.extend([x[0]['user']['followers_count']])
data_for_plots.extend([x[0]['user']['statuses_count']])
data_for_plots.extend([twit_day_range])
data_for_plots.extend([len(x)])
for item in x:
#the encode(ascii,ignore) will convert text to ascii and ignore other
td = item['created_at']
twt_date = datetime.date(int(td[26:30]), int(time.strptime(td[4:7],'%b').tm_mon), int(td[8:10]))
fix_nm = item['user']['screen_name']
try:
if str(item['user']['name']).encode('utf8','ignore'): fix_nm = str(item['user']['name'])
except:
fix_nm = item['user']['screen_name']
try:
fix_text = text_sanitize(item['text'].encode('utf8','ignore'))
except:
#print 'something wrong with the text in tweet for: ',in_user
fix_text = 'Did not process'
#print fix_text,'\t',type(item['text']),'\t',len(item['text']),'\t',item['text'],
twt_list_data = [twt_date] + [fix_nm.lower()] + [fix_text]
try:
base_twit_list.append(twt_list_data)
except:
print '<p>Unknown Error:', type(twt_list_data), twt_list_data, '</p>'
textitem = fix_text
newhastags = re.findall('[#]\w+',textitem)
newatitems = re.findall('[@]\w+',textitem)
re_tweets = re.findall('RT',textitem)
#before adding to the final lists, convert the hashtags and atitems
#to lower case. This will avoid issues of double counting same names
newhastags = [hti.lower() for hti in newhastags]
newatitems = [ati.lower() for ati in newatitems]
#Now add to the list.
#Use EXTEND function that adds elements to the list rahter than another list.
hashtag_list.extend(newhastags)
at_list.extend(newatitems)
re_twt_list.extend(re_tweets)
#now try to find some patterns in the last 200 tweets
#print 'use the collections library to find out the top 5'
#Version 2.6 of python does not support Counters within collections
#py2.6 hashcollect = collections.Counter(hashtag_list)
#py2.6 atcollect = collections.Counter(at_list)
totalretweets = len(re_twt_list)
retwpercent = (1.0 * totalretweets / max(1,len(x)) ) * 100
top10users = []
#print '\n.............................' ,'</p>'
print >> t2, '<li>', '\t',"%.2f%%" % retwpercent, 'are retweets (',totalretweets,'of a total of',len(x),'tweets)' ,'</li>'
print >> t2, '<ul>'
print >> t2, '<li>',(len(x)-totalretweets), 'tweets in ',twit_day_range,' days (without retweets)</li>'
print >> t2, '<li>','Avg of ','%.1f' %( 1.0*(len(x)-totalretweets)/max(twit_day_range,1) ),'tweets per day (without retweets)</li>'
print >> t2, '</ul></ul>'
data_for_plots.extend([totalretweets])
print >> t2, '<ul>'
print >> t2, '<li>', '\tHastags referenced over past',len(x),'tweets = ',len(hashtag_list) ,'</li>'
print >> t2, '<li>', '\t10 Most referenced hashtags' ,'</li>'
print >> t2, '<ul>'
#py2.6 for h_item in hashcollect.most_common(10): #can't use in python 2.6
for h_item in top_list(hashtag_list,10):
print >> t2, '<li>',text_sanitize(h_item[1]),'|',h_item[0] ,'</li>'
print >> t2, '</ul></ul>'
print >> t2, '<ul>'
print >> t2, '<li>', '\tTwitter IDs referenced over past',len(x),'tweets = ',len(at_list) ,'</li>'
print >> t2, '<li>', '\t10 Most referenced Tweeter IDs' ,'</li>'
print >> t2, '<ul>'
#py2.6 for at_item in atcollect.most_common(10):
for at_item in top_list(at_list,10):
print >> t2, '<li>', '\t\t',text_sanitize(at_item[1]),'|',at_item[0],'</li>'
#add the list of users to the top10user list
top10users.append(at_item[1].replace('@',''))
print >> t2, '</ul></ul>'
#print '<p>Twit list:',type(base_twit_list),'\t',len(base_twit_list),'</p>'
return top10users, base_twit_list, data_for_plots
def display_data(scn_name):
html_start()
print '<div id="body_sty">'
print '<h4>Data shown for '+scn_name.upper()+' and 10 other users most referenced in '+scn_name.upper()+'\'s tweets.</h4><hr>'
user_to_check = scn_name
if user_to_check[0] == '@':
user_raw = user_to_check
user_to_check = user_raw.replace('@','')
# the following lines get the user info
# -- this is response limited to 180
#user_public_info(user_to_check)
max_items_to_show = 200
max_tweets_to_get = 200
#if temp file exists, close it
global t2
try:
t2.close()
except:
print ''
#open the temp file
t2=TemporaryFile()
print >> t2, '''
<a href="#" onclick="show_hideStuff('detailed_data'); return false;">
<br><br><hr><br>
<h3>Detailed Data (click to see or hide)</h3></a><br>
<div id="detailed_data" style="display:none">
'''
# last xx tweets is response limited to 180
res_last200_tweets = get_last200_tweets(user_to_check.lower())
#print '<p>', type(res_last200_tweets), len(res_last200_tweets), '</p>'
final_tweet_list = []
final_data_for_plots = []
do_rest_of_display_data = 0
try:
user_reference = res_last200_tweets[0]
tweet_last200_tweets = res_last200_tweets[1]
final_tweet_list.append(tweet_last200_tweets)
final_data_for_plots.append(res_last200_tweets[2])
do_rest_of_display_data = 1
except:
print '<p>Something wrong to get the list of twitter IDs</p>'
if (do_rest_of_display_data == 1):
print >> t2, '<br>'
try:
if len(user_reference) > 0:
for newuser in user_reference:
if newuser != user_to_check:
res_last200_tweets = get_last200_tweets(newuser.lower())
tweets_from_res_last200 = res_last200_tweets[1]
final_tweet_list.append(tweets_from_res_last200)
final_data_for_plots.append(res_last200_tweets[2])
else:
print >>t2, '<p>', 'Did not find any instance of other users referenced in your tweets.' ,'</p>'
except:
print >>t2, '<p>', 'No info found.' ,'</p>'
#Add the data to the temp file also
print >> t2, '<br><br><hr><h4>List of Tweets Analyzed</h4>'
print >> t2, '<table id="table1" class="pure-table" width=100% style="display: block;">'
print >> t2, '<thead><tr bgcolor=#def><td>Date</td><td>Sender</td><td>Text</td></tr></thead>'
row_even = True
for i1 in final_tweet_list:
for i2 in i1:
#database fields: current date, username, screen name, twt_date, twt_writer, twt_text
twts = [datetime.date.today(),scn_name,user_to_check,i2[0],text_sanitize(i2[1]),text_sanitize(i2[2])]
try:
if row_even == True:
print >> t2, '<tr><td><sm>', twts[3] ,'</sm></td><td><sm>', str(twts[4]),'</sm></td><td><sm>', str(twts[5]),'</sm></td></tr>'
row_even = False
else:
print >> t2, '<tr class="pure-table-odd"><td><sm>', twts[3] ,'</sm></td><td><sm>', str(twts[4]),'</sm></td><td><sm>', str(twts[5]),'</sm></td></tr>'
row_even = True
except:
print '',
print >> t2, '</table>'
#print out the chart data
#data fields: screen_name, friends, followers, msgs, daterange, tweets, retweets
#print json.dumps(final_data_for_plots,indent=2)
#try doing a chart
#draw a chart showing friends and followers
print '<h3>Friends and Followers</h3>'
x_fdfp = []
y1_fdfp = []
y2_fdfp = []
#print '<p>Before adding data:',x_fdfp, y_fdfp, '</p>'
x_fdfp.append( 'Screen Name' )
y1_fdfp.append( 'Friends' )
y2_fdfp.append( 'Followers' )
for xy1 in range(len(final_data_for_plots)):
x_fdfp.append( final_data_for_plots[xy1][0] )
y1_fdfp.append( final_data_for_plots[xy1][1] )
y2_fdfp.append( final_data_for_plots[xy1][2] )
two_bar_chart_data("Friends and Followers", x_fdfp, y1_fdfp, y2_fdfp)
print '<h3>Followers to Friends Ratio</h3>'
#Draw a bar chart to show followers to friends ratio
x_fdfp = []
y_fdfp = []
#print '<p>Before adding data:',x_fdfp, y_fdfp, '</p>'
for xy1 in range(len(final_data_for_plots)):
x_fdfp.append( final_data_for_plots[xy1][0] )
y_fdfp.append( round( 1.0 * final_data_for_plots[xy1][2] / max(final_data_for_plots[xy1][1],1),1) )
#print '<p>',x_fdfp, y_fdfp, '</p>'
bar_chart_data("Followers to Friends Ratio", x_fdfp, y_fdfp)
print '<h3>Tweets sent per day</h3>'
x_fdfp = []
y1_fdfp = []
y2_fdfp = []
#print '<p>Before adding data:',x_fdfp, y_fdfp, '</p>'
x_fdfp.append( 'Screen Name' )
y1_fdfp.append( 'Tweets per day - with retweets' )
y2_fdfp.append( 'Tweets per day - without retweets' )
for xy1 in range(len(final_data_for_plots)):
x_fdfp.append( final_data_for_plots[xy1][0] )
y1_fdfp.append( final_data_for_plots[xy1][5] / max(final_data_for_plots[xy1][4],1) )
y2_fdfp.append( (final_data_for_plots[xy1][5]-final_data_for_plots[xy1][6]) / max(final_data_for_plots[xy1][4],1) )
two_bar_chart_data("Tweets sent per day", x_fdfp, y1_fdfp, y2_fdfp)
print '<h3>Tweet range (tweets seen per day)</h3>'
x_fdfp = []
y_fdfp = []
#print '<p>Before adding data:',x_fdfp, y_fdfp, '</p>'
for xy1 in range(len(final_data_for_plots)):
x_fdfp.append( final_data_for_plots[xy1][0] )
y_fdfp.append( round( 1.0 * final_data_for_plots[xy1][2] * final_data_for_plots[xy1][5] / max(final_data_for_plots[xy1][4],1) ) )
#print '<p>',x_fdfp, y_fdfp, '</p>'
bar_chart_data("Tweet Range", x_fdfp, y_fdfp)
lex_anal(final_tweet_list)
#print out the detailed data
# go to the first record of the temp file first
print >> t2, ' </div> '
t2.seek(0)
print t2.read()
t2.close()
#if this works - can delete below this.
else:
print '<p>Not able to process this user. Please try another.</p>'
print '</div>' #close the body_sty div
html_end()
def lex_anal(incomingTweetList):
'''
routine to do lexical analysis
'''
#final_tweet_list --- date / sender full name / tweet
#read the tweets and create a list of sender-htag and sender-@
#incoming TweetList has two layer lists
sender_htag = []
sender_at = []
h_tags_all = []
at_items_all = []
ts_all = []
for lex2 in incomingTweetList:
for lex22 in lex2:
td = lex22[0] #this is the tweet date
try:
ts = text_sanitize(lex22[1]) #this is the tweet sender
except:
print 'something wrong with ',lex22[1]
ts = '---'
ts_all.append(ts)
h_tags = re.findall('[#]\w+',lex22[2]) #these are the h-tags
at_items = re.findall('[@]\w+',lex22[2]) #these are the other users
h_tags = [hti.lower() for hti in h_tags]
at_items = [ati.lower() for ati in at_items]
for h2 in h_tags:
sender_htag.append([td,ts.lower()+'-'+h2])
h_tags_all.append(h2)
for at2 in at_items:
sender_at.append([td,ts.lower()+'-'+at2])
at_items_all.append(at2)
#summarize the two new lists
#following lists don't have dates
sender_htag2 = [xx[1] for xx in sender_htag]
sender_at2 = [yy[1] for yy in sender_at]
#make a list of the tweet senders only
ts_all = list(set(ts_all))
#print ts_all
#get the top 10 htags
#py2.6 ht_col = collections.Counter(h_tags_all)
htag_data4heatmap = []
at_data4heatmap = []
#print '<ul>Top 10 Hashtags'
#py2.6 for h_item in ht_col.most_common(10):
for h_item in top_list(h_tags_all,10):
#print '<li>', h_item, '</li>'
#count the number of times each of the hastag was referenced by each tweet sender
try:
for tsitem in ts_all:
try:
itemtocount = str(tsitem+'-'+h_item[1])
htag_data4heatmap.append([tsitem,h_item[1], sender_htag2.count(itemtocount)])
except:
print 'Problem here: ',h_item,tsitem
except:
print 'Problem here',h_item
print '</ul>'
#get the top 10 user references
#py2.6 at_col = collections.Counter(at_items_all)
#print '<ul>Top 10 Users'
#py2.6 for a_item in at_col.most_common(10):
for a_item in top_list(at_items_all,10):
#print '<li>', a_item, '</li>'
#count the number of times each of the hastag was referenced by each tweet sender
try:
for tsitem in ts_all:
itemtocount = str(tsitem+'-'+a_item[1])
at_data4heatmap.append([tsitem,a_item[1], sender_at2.count(itemtocount)])
except:
print 'Problem here 2',a_item
print '</ul>'
#draw the table with the heatmap
tcols = len(ts_all) #number of tweet senders - rows
trows = len(htag_data4heatmap) / tcols #number of hastags - cols
#print trows, tcols
if trows>0:
print '<br><br>'
print '<h3>Most Popular Hashtags</h3>'
heatmap_table(trows,tcols,htag_data4heatmap)
tcols = len(ts_all) #number of tweet senders - rows
trows = len(at_data4heatmap) / tcols #number of hastags - cols
#print trows, tcols
if trows>0:
print '<br><br>'
print '<h3>Most Referenced Users</h3>'
heatmap_table(trows,tcols,at_data4heatmap)
def heatmap_table(trows,tcols,hm):
#calculate the max and min of the references
#and create a normalized color scale
mx = max(i[2] for i in hm)
mn = min(i[2] for i in hm)
itv = mx - mn
#COLOR pallete from http://colorbrewer2.org/
for arow in hm:
rval = 1.0*arow[2]/itv
if rval<0.1:
arow[2]='#FFF5F0'
elif rval>=0.1 and rval<0.25:
arow[2]='#FEE0D2'
elif rval>=0.25 and rval<0.4:
arow[2]='#FCBBA1'
elif rval>=0.4 and rval<0.5:
arow[2]='#FC9272'
elif rval>=0.5 and rval<0.6:
arow[2]='#FB6A4A'
elif rval>=0.6 and rval<0.7:
arow[2]='#EF3B2C'
elif rval>=0.7 and rval<0.8:
arow[2]='#CB181D'
elif rval>=0.8 and rval<0.9:
arow[2]='#A50F15'
elif rval>=0.9:
arow[2]='#67000D'
print '<table width=100% style="display: block;"> '
for i in range(trows+1):
print '<tr>',
for j in range(tcols+1):
if (i==0 and j==0):
print '<td width="15%">','','</td>',
elif i==0 and j>0 and j<(tcols):
print '<td width="8.5%"><sm>',hm[j-1][0][:10],'</sm></td>',
elif i==0 and j==(tcols):
print '<td width="8.5%"><sm>',hm[j-1][0][:10],'</sm></td></tr>'
elif i>0 and j==0:
print '<td><sm>',hm[(i-1)*tcols+j+1-1][1],'</sm></td>',
elif i>0 and j>0 and j<tcols:
print '<td bgcolor=',hm[(i-1)*tcols+j-1][2],'></td>',
elif i>0 and j==tcols:
print '<td bgcolor=',hm[(i-1)*tcols+j-1][2],'></td></tr>'
print '</table> '
def print_detailed_tweets(in_usertocheck):
html_start()
check_another_user_button()
#print '<h3>Listing of tweets analyzed:</h3>'
sd2st = start_database_to_store_tweets()
if sd2st[1] == True:
c2 = sd2st[0]
conn2 = sd2st[2]
#read all the tweets for the username and screen name
read_text = "SELECT * FROM tweetlist WHERE (username =\'"+in_usertocheck+"\')"
#print '<p>Select tweet command:',read_text,'</p>'
try:
c2.execute(read_text)
for crow in c2:
print crow[1]
conn2.close()
#print '<h2>Finished with the tweet list</h2>'
except conn2.Error, e:
print "E Error %d: %s" % (e.args[0], e.args[1])
else:
print "F Error %d: %s" % (sd2st[0].args[0],sd2st[0].args[1])
html_end()
def bar_chart_data(cht_title,xdata,ydata):
#this routine will draw a bar chart
#print '<p>DO NOT PRINT anaything inside chart modules except needed items</p>'
print '<!--Load the AJAX API-->'
print '<script type=\"text/javascript\" src=\"https://www.google.com/jsapi\"></script>'
print '<script type=\"text/javascript\">'
# Load the Visualization API and the piechart package.
print ' google.load(\'visualization\', \'1.0\', {\'packages\':[\'corechart\']}); '
# Set a callback to run when the Google Visualization API is loaded.
print ' google.setOnLoadCallback(drawChart);'
# Callback that creates and populates a data table,
# instantiates the pie chart, passes in the data and
# draws it.
print ' function drawChart() { '
# Create the data table.
print ' var data = new google.visualization.arrayToDataTable([ '
print ' [ \'Screen Name\', \' ' , cht_title, ' \', {role:\'style\'} ], '
for cdi in range(len(xdata)):
if cdi == 0:
print " [ \'", xdata[cdi], "\',", ydata[cdi], ", \'orange\' ], "
else:
print " [ \'", xdata[cdi], "\',", ydata[cdi], ", \'blue\' ], "
print ' ]); '
#Set chart options
print " var options = {\'title\':\'",cht_title,"\', "
print ' \'width\':600, '
print ' \'height\':400, '
print ' \'hAxis\' : {\'logScale\' : true} , '
print ' legend :\'none\' , \'backgroundColor\': { fill: \"none\" } '
print ' }; '
# chart_bottom():
# Instantiate and draw our chart, passing in some options.
print ' var chart = new google.visualization.BarChart(document.getElementById(\"',cht_title+'DIV','\")); '
print ' function selectHandler() { '
print ' var selectedItem = chart.getSelection()[0]; '
print ' if (selectedItem) { '
print ' var topping = data.getValue(selectedItem.row, 0); '
print ' alert(\'The user selected \' + topping); '
print ' } '
print ' } '
print ' google.visualization.events.addListener(chart, \'select\', selectHandler); '
print ' chart.draw(data, options); '
print ' } '
print '</script> '
print '<!--Div that will hold the pie chart--> '
print '<div id=\"',cht_title+'DIV','\" style=\"width:600; height:400\"></div> '
def two_bar_chart_data(cht_title,xdata,ydata1,ydata2):
#this routine will draw a bar chart with two bara
#print '<p>DO NOT PRINT anaything inside chart modules except needed items</p>'
print '<!--Load the AJAX API-->'
print '<script type=\"text/javascript\" src=\"https://www.google.com/jsapi\"></script>'
print '<script type=\"text/javascript\">'
# Load the Visualization API and the piechart package.
print ' google.load(\'visualization\', \'1.0\', {\'packages\':[\'corechart\']}); '
# Set a callback to run when the Google Visualization API is loaded.
print ' google.setOnLoadCallback(drawChart);'
print ' function drawChart() { '
print ' var data = new google.visualization.arrayToDataTable([ '
print " [ \'Screen Name\', \' ",ydata1[0], "\' ,{role:\'style\'}, \'" ,ydata2[0], "\' , {role:\'style\'} ], "
for cdi in range(len(xdata)):
if cdi>0:
print " [ \'", xdata[cdi], "\',", ydata1[cdi],",\'blue\',", ydata2[cdi], ", \'red\' ], "
print ' ]); '
#Set chart options
print " var options = {\'title\':\'",cht_title,"\', "
print ' \'width\':600, '
print ' \'height\':400, '
print ' \'hAxis\' : {\'logScale\' : false} , '
print ' legend :\'top\' , \'backgroundColor\': { fill: \"none\" } '
print ' }; '
# chart_bottom():
# Instantiate and draw our chart, passing in some options.
print ' var chart = new google.visualization.BarChart(document.getElementById(\"',cht_title+'DIV','\")); '
print ' function selectHandler() { '
print ' var selectedItem = chart.getSelection()[0]; '
print ' if (selectedItem) { '
print ' var topping = data.getValue(selectedItem.row, 0); '
print ' alert(\'The user selected \' + topping); '
print ' } '
print ' } '
print ' google.visualization.events.addListener(chart, \'select\', selectHandler); '
print ' chart.draw(data, options); '
print ' } '
print '</script> '
print '<!--Div that will hold the pie chart--> '
print '<div id=\"',cht_title+'DIV','\" style=\"width:600; height:400\"></div> '
def test3():
#Test some random twitter functions on stream data
html_start()
testname = "concession,privatization,public private"
#testname = "mining,mines,metal,oil,gas,petroleum"
try:
ts = TwitterStream(auth=OAuth(define_keys()[2],define_keys()[3],define_keys()[0],define_keys()[1]))
#response = ts.statuses.sample()
response = ts.statuses.filter(track=testname)
showcount = 0
maxshow = 50
for tweet in response:
showcount += 1
if showcount>= maxshow: break
# You must test that your tweet has text. It might be a delete
# or data message.
if tweet is None:
print_para("-- None --")
elif tweet.get('text'):
print_para(tweet['user']['name']+'.....'+str(twit_date(tweet['created_at']))+'---'+tweet['text'])
else:
print_para(str(showcount)+'...')
#print_para(json.dumps(tweet,indent=2))
except TwitterHTTPError, e:
print '<p>Error getting tweets info for:',e['details'],'</p>'
html_end()
def print_para(instr):
print '<p>',instr,'</p>'
def twit_date(in_created_at):
out_date = datetime.date(int(in_created_at[26:30]), int(time.strptime(in_created_at[4:7],'%b').tm_mon), int(in_created_at[8:10]))
return out_date
# Define main function.
def main():
form = cgi.FieldStorage()
if (form.has_key("action") and form.has_key("scn_name")):
if (form["action"].value == "display"):
display_data(text_sanitize(form["scn_name"].value))
else:
generate_form()
main()
| 29,208
| 0
| 613
|
47aeba5f5a974bde56729cafe676435b3057e324
| 3,765
|
py
|
Python
|
sonde/qaqc_viewer.py
|
wilsaj/pint
|
a2b2a6ea9ff480a168358af642cf36c7f3c5d0e4
|
[
"BSD-3-Clause"
] | 1
|
2017-12-06T04:28:59.000Z
|
2017-12-06T04:28:59.000Z
|
sonde/qaqc_viewer.py
|
wilsaj/pint
|
a2b2a6ea9ff480a168358af642cf36c7f3c5d0e4
|
[
"BSD-3-Clause"
] | null | null | null |
sonde/qaqc_viewer.py
|
wilsaj/pint
|
a2b2a6ea9ff480a168358af642cf36c7f3c5d0e4
|
[
"BSD-3-Clause"
] | null | null | null |
"""
QAQC Viewer based on Chaco & Traits
"""
#from enthought.chaco.example_support import COLOR_PALETTE
#from enthought.enable.example_support import DemoFrame, demo_main
# Enthought library imports
from enthought.enable.api import Window, Component, ComponentEditor
from enthought.traits.api import HasTraits, Instance
from enthought.traits.ui.api import Item, Group, View
# Chaco imports
from enthought.chaco.api import Plot, ArrayDataSource, ArrayPlotData, \
BarPlot, DataRange1D, LabelAxis, LinearMapper, VPlotContainer, \
PlotAxis, PlotGrid, LinePlot, add_default_grids, PlotLabel
from enthought.chaco.tools.api import PanTool, ZoomTool
from enthought.chaco.scales.api import CalendarScaleSystem
from enthought.chaco.scales_tick_generator import ScalesTickGenerator
from sonde import Sonde
import time
import numpy as np
#==============================================================================
# Attributes to use for the plot view.
#size=(800,600)
#title="Salinity plot example"
if __name__ == "__main__":
viewer = BaseViewer()
viewer.configure_traits()
| 41.833333
| 110
| 0.601594
|
"""
QAQC Viewer based on Chaco & Traits
"""
#from enthought.chaco.example_support import COLOR_PALETTE
#from enthought.enable.example_support import DemoFrame, demo_main
# Enthought library imports
from enthought.enable.api import Window, Component, ComponentEditor
from enthought.traits.api import HasTraits, Instance
from enthought.traits.ui.api import Item, Group, View
# Chaco imports
from enthought.chaco.api import Plot, ArrayDataSource, ArrayPlotData, \
BarPlot, DataRange1D, LabelAxis, LinearMapper, VPlotContainer, \
PlotAxis, PlotGrid, LinePlot, add_default_grids, PlotLabel
from enthought.chaco.tools.api import PanTool, ZoomTool
from enthought.chaco.scales.api import CalendarScaleSystem
from enthought.chaco.scales_tick_generator import ScalesTickGenerator
from sonde import Sonde
import time
import numpy as np
class BaseViewer(HasTraits):
main_tab = Instance(Component)
traits_view = View(Item('main_tab', editor=ComponentEditor),
width=500, height=500, resizable=True, title="Salinity Plot")
def __init__(self, **kwargs):
HasTraits.__init__(self, **kwargs)
self.init_data()
def init_data(self):
file_name = '/home/dpothina/work/apps/pysonde/tests/ysi_test_files/BAYT_20070323_CDT_YS1772AA_000.dat'
sonde = Sonde(file_name)
sal_ds = np.array([1, 2, 3, 4, 5, 6, 7, 8]) # sonde.data['seawater_salinity']
time_ds = sal_ds**2 # [time.mktime(date.utctimetuple()) for date in sonde.dates]
#time_ds = ArrayDataSource(dt)
#sal_ds = ArrayDataSource(salinity, sort_order="none")
self.plot_data = ArrayPlotData(sal_ds=sal_ds,
time_ds=time_ds)
def _main_tab_default(self):
self.sal_plot = Plot(self.plot_data)
self.sal_plot.plot(('time_ds', 'sal_ds'), type='line')
#sal_plot.overlays.append(PlotAxis(sal_plot, orientation='left'))
#bottom_axis = PlotAxis(sal_plot, orientation="bottom",# mapper=xmapper,
# tick_generator=ScalesTickGenerator(scale=CalendarScaleSystem()))
#sal_plot.overlays.append(bottom_axis)
#hgrid, vgrid = add_default_grids(sal_plot)
#vgrid.tick_generator = bottom_axis.tick_generator
#sal_plot.tools.append(PanTool(sal_plot, constrain=True,
# constrain_direction="x"))
#sal_plot.overlays.append(ZoomTool(sal_plot, drag_button="right",
# always_on=True,
# tool_mode="range",
# axis="index",
# max_zoom_out_factor=10.0,
# ))
container = VPlotContainer(bgcolor="lightblue",
spacing=40,
padding=50,
fill_padding=False)
container.add(sal_plot)
#container.add(price_plot)
#container.overlays.append(PlotLabel("Salinity Plot with Date Axis",
# component=container,
# #font="Times New Roman 24"))
# font="Arial 24"))
return container
#def default_traits_view(self):
# return View(Group(Item('main_tab', editor=ComponentEditor)),
# width=500, height=500, resizable=True, title="Salinity Plot")
#==============================================================================
# Attributes to use for the plot view.
#size=(800,600)
#title="Salinity plot example"
if __name__ == "__main__":
viewer = BaseViewer()
viewer.configure_traits()
| 2,200
| 450
| 23
|
86f12b2a5cc0b34fb6db729600f73e406c9d8539
| 1,514
|
py
|
Python
|
src/tt_calendar/tt_calendar/logic.py
|
al-arz/the-tale
|
542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5
|
[
"BSD-3-Clause"
] | 85
|
2017-11-21T12:22:02.000Z
|
2022-03-27T23:07:17.000Z
|
src/tt_calendar/tt_calendar/logic.py
|
al-arz/the-tale
|
542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5
|
[
"BSD-3-Clause"
] | 545
|
2017-11-04T14:15:04.000Z
|
2022-03-27T14:19:27.000Z
|
src/tt_calendar/tt_calendar/logic.py
|
al-arz/the-tale
|
542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5
|
[
"BSD-3-Clause"
] | 45
|
2017-11-11T12:36:30.000Z
|
2022-02-25T06:10:44.000Z
|
import datetime
from . import relations
| 22.597015
| 66
| 0.61889
|
import datetime
from . import relations
def actual_real_feasts(now=None):
if now is None:
now = datetime.datetime.utcnow()
now = now.replace(year=datetime.MINYEAR)
for feast in relations.REAL_FEAST.records:
for interval in feast.intervals:
if interval[0] <= now <= interval[1]:
yield feast
break
def actual_dates(now, relation):
for date in relation.records:
for interval in date.intervals:
if interval[0] <= (now.month, now.day) <= interval[1]:
yield date
break
def is_day_off(date):
if date.day in (14, 29, 44, 59, 74, 89):
return True
if date.month == relations.MONTH.DRY.value and date.day == 1:
return True
return False
def day_type(date):
if is_day_off(date):
return relations.DAY_TYPE.DAY_OFF
return relations.DAY_TYPE.WEEKDAY
def day_times(time):
if time.hour < 7 or 19 <= time.hour:
yield relations.DAY_TIME.DARK_TIME
else:
yield relations.DAY_TIME.LIGHT_TIME
if time.hour < 7:
yield relations.DAY_TIME.NIGHT
elif time.hour < 10:
yield relations.DAY_TIME.MORNING
elif time.hour < 16:
yield relations.DAY_TIME.DAY
elif time.hour < 19:
yield relations.DAY_TIME.EVENING
else:
yield relations.DAY_TIME.NIGHT
if time.hour == 7:
yield relations.DAY_TIME.DAWN
if time.hour == 19:
yield relations.DAY_TIME.SUNSET
| 1,352
| 0
| 115
|
120fa0d15479ccd5b4653c3adf9354e51e55b55c
| 573
|
py
|
Python
|
ComicPub/comics/admin.py
|
Xonshiz/ComicPub
|
d332ee1b62d6c28347954280696c86898de6d125
|
[
"MIT"
] | 8
|
2017-09-02T07:04:59.000Z
|
2020-12-17T17:30:34.000Z
|
ComicPub/comics/admin.py
|
Xonshiz/ComicPub
|
d332ee1b62d6c28347954280696c86898de6d125
|
[
"MIT"
] | 1
|
2017-10-24T12:49:57.000Z
|
2017-10-24T15:04:44.000Z
|
ComicPub/comics/admin.py
|
Xonshiz/ComicPub
|
d332ee1b62d6c28347954280696c86898de6d125
|
[
"MIT"
] | 4
|
2017-10-24T14:13:13.000Z
|
2021-12-15T17:09:23.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from comics.models import Comic, ComicChapter
# class PageFileInline(admin.TabularInline):
# model = ComicChapter
#
#
# class PageAdmin(admin.ModelAdmin):
# inlines = [PageFileInline, ]
# class ChapterInline(admin.TabularInline):
# model = ComicChapterFiles
#
# class ComicAdmin(admin.ModelAdmin):
# inlines = [
# ChapterInline,
# ]
# admin.site.register(ComicChapter, ComicAdmin)
admin.site.register(Comic)
admin.site.register(ComicChapter)
| 21.222222
| 47
| 0.724258
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from comics.models import Comic, ComicChapter
# class PageFileInline(admin.TabularInline):
# model = ComicChapter
#
#
# class PageAdmin(admin.ModelAdmin):
# inlines = [PageFileInline, ]
# class ChapterInline(admin.TabularInline):
# model = ComicChapterFiles
#
# class ComicAdmin(admin.ModelAdmin):
# inlines = [
# ChapterInline,
# ]
# admin.site.register(ComicChapter, ComicAdmin)
admin.site.register(Comic)
admin.site.register(ComicChapter)
| 0
| 0
| 0
|
d7e5e4980b5718dcaa9192759e6b4c3e5d658b97
| 2,457
|
py
|
Python
|
chpt6/Generate_random_characters.py
|
GDG-Buea/learn-python
|
9dfe8caa4b57489cf4249bf7e64856062a0b93c2
|
[
"Apache-2.0"
] | null | null | null |
chpt6/Generate_random_characters.py
|
GDG-Buea/learn-python
|
9dfe8caa4b57489cf4249bf7e64856062a0b93c2
|
[
"Apache-2.0"
] | 2
|
2018-05-21T09:39:00.000Z
|
2018-05-27T15:59:15.000Z
|
chpt6/Generate_random_characters.py
|
GDG-Buea/learn-python
|
9dfe8caa4b57489cf4249bf7e64856062a0b93c2
|
[
"Apache-2.0"
] | 2
|
2018-05-19T14:59:56.000Z
|
2018-05-19T15:25:48.000Z
|
# This program displays 100 lowercase letters, fifteen per line
import turtle
from random import randint
main()
print()
# Draw a line from (x1, y1) to (x2, y2)
# def drawLine(x1, y1, x2, y2):
# turtle.penup()
# turtle.goto(x1, y1)
# turtle.pendown()
# turtle.goto(x2, y2)
# def writeText(s, x, y):
# turtle.penup() # Pull the pen up
# turtle.goto(x, y)
# turtle.pendown() # Pull the pen down
# turtle.write(s) # Write a string
# # Draw a point at the specified location (x, y)
# def drawPoint(x, y):
# turtle.penup() # Pull the pen up
# turtle.goto(x, y)
# turtle.pendown() # Pull the pen down
# turtle.begin_fill() # Begin to fill color in a shape
# turtle.circle(3)
# turtle.end_fill() # Fill the shape
# # Draw a circle centered at (x, y) with the specified radius
# def drawCircle(x = 0, y = 0, radius = 10):
# turtle.penup() # Pull the pen up
# turtle.goto(x, y - radius)
# turtle.pendown() # Pull the pen down
# turtle.circle(radius)
# # Draw a rectangle at (x, y) with the specified width and height
# def drawRectangle(x = 0, y = 0, width = 10, height = 10):
# turtle.penup() # Pull the pen up
# turtle.goto(x + width / 2, y + height / 2)
# turtle.pendown() # Pull the pen down
# turtle.right(90)
# turtle.forward(height)
# turtle.right(90)
# turtle.forward(width)
# turtle.right(90)
# turtle.forward(height)
# turtle.right(90)
# turtle.forward(width)
# Generate a random uppercase letter
# def getRandomUpperCaseLetter() :
# return getRandomCharacter('A', 'Z')
# # Generate a random digit character
# def getRandomDigitCharacter() :
# return getRandomCharacter('0', '9')
# # Generate a random character
# def getRandomASCIICharacter() :
# return chr(randint(0, 127))
#
# # Generate a random character between ch1 and ch2
# def getRandomCharacter(ch1, ch2) :
# return chr(randint(ord(ch1), ord(ch2)))
#
| 23.179245
| 66
| 0.659341
|
# This program displays 100 lowercase letters, fifteen per line
import turtle
from random import randint
def get_random_lower_case_letter():
return get_random_character('a', 'z')
def get_random_character(ch1, ch2):
return chr(randint(ord(ch1), ord(ch2)))
def write_text(s, x, y):
turtle.penup()
turtle.goto(x, y)
turtle.pendown()
turtle.write(s)
turtle.goto(x, y)
turtle.done()
def main():
count = 0
number_of_characters = 100
characters_per_line = 15
print("\n")
for i in range(number_of_characters):
print("\t", get_random_lower_case_letter(), end=' ')
count += 1
if count % characters_per_line == 0:
print()
main()
print()
# Draw a line from (x1, y1) to (x2, y2)
# def drawLine(x1, y1, x2, y2):
# turtle.penup()
# turtle.goto(x1, y1)
# turtle.pendown()
# turtle.goto(x2, y2)
# def writeText(s, x, y):
# turtle.penup() # Pull the pen up
# turtle.goto(x, y)
# turtle.pendown() # Pull the pen down
# turtle.write(s) # Write a string
# # Draw a point at the specified location (x, y)
# def drawPoint(x, y):
# turtle.penup() # Pull the pen up
# turtle.goto(x, y)
# turtle.pendown() # Pull the pen down
# turtle.begin_fill() # Begin to fill color in a shape
# turtle.circle(3)
# turtle.end_fill() # Fill the shape
# # Draw a circle centered at (x, y) with the specified radius
# def drawCircle(x = 0, y = 0, radius = 10):
# turtle.penup() # Pull the pen up
# turtle.goto(x, y - radius)
# turtle.pendown() # Pull the pen down
# turtle.circle(radius)
# # Draw a rectangle at (x, y) with the specified width and height
# def drawRectangle(x = 0, y = 0, width = 10, height = 10):
# turtle.penup() # Pull the pen up
# turtle.goto(x + width / 2, y + height / 2)
# turtle.pendown() # Pull the pen down
# turtle.right(90)
# turtle.forward(height)
# turtle.right(90)
# turtle.forward(width)
# turtle.right(90)
# turtle.forward(height)
# turtle.right(90)
# turtle.forward(width)
# Generate a random uppercase letter
# def getRandomUpperCaseLetter() :
# return getRandomCharacter('A', 'Z')
# # Generate a random digit character
# def getRandomDigitCharacter() :
# return getRandomCharacter('0', '9')
# # Generate a random character
# def getRandomASCIICharacter() :
# return chr(randint(0, 127))
#
# # Generate a random character between ch1 and ch2
# def getRandomCharacter(ch1, ch2) :
# return chr(randint(ord(ch1), ord(ch2)))
#
| 533
| 0
| 92
|
b4b58aa4d7d83f1298f775781fc1a78f79bf902f
| 531
|
py
|
Python
|
miniProject/miniApp/urls.py
|
cs-fullstack-2019-spring/django-mini-project5-gkg901
|
35af15000480a104f46adb62ba9ceebd4d0ad7a1
|
[
"Apache-2.0"
] | null | null | null |
miniProject/miniApp/urls.py
|
cs-fullstack-2019-spring/django-mini-project5-gkg901
|
35af15000480a104f46adb62ba9ceebd4d0ad7a1
|
[
"Apache-2.0"
] | null | null | null |
miniProject/miniApp/urls.py
|
cs-fullstack-2019-spring/django-mini-project5-gkg901
|
35af15000480a104f46adb62ba9ceebd4d0ad7a1
|
[
"Apache-2.0"
] | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('allrecipes/', views.allrecipes, name='allrecipes'),
path('newrecipe/', views.newrecipe, name='newrecipe'),
path('profile/', views.profile, name='profile'),
path('newuser/', views.newuser, name='newuser'),
path('details/<int:ID>', views.details, name='details'),
path('edituser/<int:ID>', views.edituser, name='edituser'),
path('editrecipe/<int:ID>', views.editrecipe, name='editrecipe'),
]
| 37.928571
| 69
| 0.664783
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('allrecipes/', views.allrecipes, name='allrecipes'),
path('newrecipe/', views.newrecipe, name='newrecipe'),
path('profile/', views.profile, name='profile'),
path('newuser/', views.newuser, name='newuser'),
path('details/<int:ID>', views.details, name='details'),
path('edituser/<int:ID>', views.edituser, name='edituser'),
path('editrecipe/<int:ID>', views.editrecipe, name='editrecipe'),
]
| 0
| 0
| 0
|
e63a707a6d1aecf82dd0e657d12e6dcba8e4283c
| 3,996
|
py
|
Python
|
hash_code.py
|
Arpan-206/EncryptoCLI
|
26a7718ef387d46bfcf2d167e17a494de0165858
|
[
"MIT"
] | 2
|
2021-10-20T13:38:45.000Z
|
2022-01-11T12:36:49.000Z
|
hash_code.py
|
Arpan-206/EncryptoCLI
|
26a7718ef387d46bfcf2d167e17a494de0165858
|
[
"MIT"
] | null | null | null |
hash_code.py
|
Arpan-206/EncryptoCLI
|
26a7718ef387d46bfcf2d167e17a494de0165858
|
[
"MIT"
] | null | null | null |
# Importing the hashing library
import hashlib
# Importing the visual libraries
from PyInquirer import Separator, prompt
from termcolor import colored
# Defining the hash function.
| 27.75
| 129
| 0.508008
|
# Importing the hashing library
import hashlib
# Importing the visual libraries
from PyInquirer import Separator, prompt
from termcolor import colored
# Defining the hash function.
def hash_func():
# Asking the user for further data regarding algoritms
hash_info = prompt([
{
'type': 'list',
'qmark': '>',
'name': 'algorithm',
'message': 'Which algorithm do you want to use?',
'choices': [
Separator(),
{
'name': 'MD5',
},
{
'name': 'SHA256',
},
{
'name': 'SHA512',
},
{
'name': 'BLAKE2',
},
{
'name': 'BLAKE2b',
},
],
},
{
'type': 'list',
'qmark': '>',
'name': 'type_of_data',
'message': 'What do you want to hash?',
'choices': [
Separator(),
{
'name': 'Text',
},
{
'name': 'File',
},
],
},
])
# Storing the data into seperate variables
algorithm = hash_info['algorithm']
type_of_data = hash_info['type_of_data']
# Determining the type of data to hash and calling the appropriate functions
if type_of_data == 'File':
handle_file_hashing(algorithm)
else:
handle_text_hashing(algorithm)
def handle_text_hashing(algorithm):
# Asking the user for the data
data_info = prompt([
{
'type': 'input',
'qmark': '>',
'name': 'hash_data',
'message': 'Enter data to hash.',
},
])
# Defining the hash_out variable according to the algorithm selected by user
if algorithm == 'MD5':
hash_out = hashlib.md5()
elif algorithm == 'SHA256':
hash_out = hashlib.sha256()
elif algorithm == 'SHA512':
hash_out = hashlib.sha512()
elif algorithm == 'BLAKE2':
hash_out = hashlib.blake2s()
else:
hash_out = hashlib.blake2b()
# Populating it the data after converting it to binary
hash_out.update(data_info['hash_data'].encode())
# Calculating the actual hash
hash_out = hash_out.hexdigest()
# Printing out the hash
print(colored('Your hash is: ', 'white') + colored(hash_out, 'green'))
return None
def handle_file_hashing(algorithm):
# Asking the user for the path to the file
file_info = prompt([
{
'type': 'input',
'qmark': '>',
'name': 'file_name',
'message': 'Enter the path to the file.',
},
])
try:
# Again, Defining the hash_out variable according to the algorithm selected by user
if algorithm == 'MD5':
hash_out = hashlib.md5()
elif algorithm == 'SHA256':
hash_out = hashlib.sha256()
elif algorithm == 'SHA512':
hash_out = hashlib.sha512()
elif algorithm == 'BLAKE2':
hash_out = hashlib.blake2s()
else:
hash_out = hashlib.blake2b()
# Populating it the data after converting it to binary but this time in chunks so as to not put too much strain on memory
with open(file_info['file_name'], 'rb') as file_path:
chunk = 0
while chunk != b'':
chunk = file_path.read(1024)
hash_out.update(chunk)
# Calculating the actual hash
hash_out = hash_out.hexdigest()
# Printing out the hash
print(colored('Your hash is: ', 'white') + colored(hash_out, 'green'))
except Exception as e:
print(colored(
'Can\'t find the file please check the name and make sure the extension is also present.', 'red'))
| 3,741
| 0
| 69
|
e155cdbdf8a6a6a7a4d4cc1a43c09c3a16b32d5c
| 3,800
|
py
|
Python
|
examples/plugins/single_project/sample_project/data/plugin/ui_service.py
|
janvonrickenbach/Envisage_wxPhoenix_py3
|
cf79e5b2a0c3b46898a60b5fe5a2fb580604808b
|
[
"BSD-3-Clause"
] | null | null | null |
examples/plugins/single_project/sample_project/data/plugin/ui_service.py
|
janvonrickenbach/Envisage_wxPhoenix_py3
|
cf79e5b2a0c3b46898a60b5fe5a2fb580604808b
|
[
"BSD-3-Clause"
] | 1
|
2017-05-22T21:15:22.000Z
|
2017-05-22T21:15:22.000Z
|
examples/plugins/single_project/sample_project/data/plugin/ui_service.py
|
janvonrickenbach/Envisage_wxPhoenix_py3
|
cf79e5b2a0c3b46898a60b5fe5a2fb580604808b
|
[
"BSD-3-Clause"
] | 1
|
2019-10-01T07:03:58.000Z
|
2019-10-01T07:03:58.000Z
|
#-----------------------------------------------------------------------------
#
# Copyright (c) 2007 by Enthought, Inc.
# All rights reserved.
#
#-----------------------------------------------------------------------------
"""
The UI service for the Data plugin.
"""
# Standard library imports.
import logging
# Enthought library imports.
from envisage.api import ApplicationObject, UOL
from pyface.api import confirm, error, FileDialog, information, YES
# Data library imports.
# Local imports.
from services import IDATA_MODEL
# Setup a logger for this module
logger = logging.getLogger(__name__)
class UiService(ApplicationObject):
"""
The UI service for the Data plugin.
"""
##########################################################################
# Attributes
##########################################################################
#### public 'UiService' interface ########################################
# A reference to the Data plugin's model service.
model_service = UOL
##########################################################################
# 'Object' interface
##########################################################################
#### operator methods ####################################################
def __init__(self, **kws):
"""
Constructor.
Extended to ensure our UOL properties are set.
"""
super(UiService, self).__init__(**kws)
# Ensure we have a default model-service if one wasn't specified.
if self.model_service is None:
self.model_service = 'service://%s' % IDATA_MODEL
return
##########################################################################
# 'UIService' interface
##########################################################################
#### public methods ######################################################
#TODO cgalvan: to be implemented
# def delete_data(self, context, data_name, parent_window):
# """
# Delete a Data.
#
# """
#
# # Open confirmation-dialog to confirm deletion
# message = 'Are you sure you want to delete %s?' % data_name
# if confirm(parent_window, message) == YES:
# self.model_service.delete_context_item(context, data_name)
#
# return
def edit_data(self, window, data):
"""
Edit the data parameters of the specified data.
"""
data_parameters = data.data_parameters
edit_ui = data_parameters.edit_traits(
view='data_view',
kind='livemodal',
# handler=handler,
parent=window)
return edit_ui.result
def display_message(self, msg, title=None, is_error=False):
"""
Display the specified message to the user.
"""
# Ensure we record any reasons this method doesn't work. Especially
# since it's critical in displaying errors to users!
try:
# Attempt to identify the current application window.
parent_window = None
workbench = self.application.get_service('envisage.'
'workbench.IWorkbench')
if workbench is not None:
parent_window = workbench.active_window.control
# Display the requested message
if is_error:
error(parent_window, msg, title=title)
else:
information(parent_window, msg, title=title)
except:
logger.exception('Unable to display pop-up message')
return
#### EOF #####################################################################
| 29.007634
| 78
| 0.460789
|
#-----------------------------------------------------------------------------
#
# Copyright (c) 2007 by Enthought, Inc.
# All rights reserved.
#
#-----------------------------------------------------------------------------
"""
The UI service for the Data plugin.
"""
# Standard library imports.
import logging
# Enthought library imports.
from envisage.api import ApplicationObject, UOL
from pyface.api import confirm, error, FileDialog, information, YES
# Data library imports.
# Local imports.
from services import IDATA_MODEL
# Setup a logger for this module
logger = logging.getLogger(__name__)
class UiService(ApplicationObject):
"""
The UI service for the Data plugin.
"""
##########################################################################
# Attributes
##########################################################################
#### public 'UiService' interface ########################################
# A reference to the Data plugin's model service.
model_service = UOL
##########################################################################
# 'Object' interface
##########################################################################
#### operator methods ####################################################
def __init__(self, **kws):
"""
Constructor.
Extended to ensure our UOL properties are set.
"""
super(UiService, self).__init__(**kws)
# Ensure we have a default model-service if one wasn't specified.
if self.model_service is None:
self.model_service = 'service://%s' % IDATA_MODEL
return
##########################################################################
# 'UIService' interface
##########################################################################
#### public methods ######################################################
#TODO cgalvan: to be implemented
# def delete_data(self, context, data_name, parent_window):
# """
# Delete a Data.
#
# """
#
# # Open confirmation-dialog to confirm deletion
# message = 'Are you sure you want to delete %s?' % data_name
# if confirm(parent_window, message) == YES:
# self.model_service.delete_context_item(context, data_name)
#
# return
def edit_data(self, window, data):
"""
Edit the data parameters of the specified data.
"""
data_parameters = data.data_parameters
edit_ui = data_parameters.edit_traits(
view='data_view',
kind='livemodal',
# handler=handler,
parent=window)
return edit_ui.result
def display_message(self, msg, title=None, is_error=False):
"""
Display the specified message to the user.
"""
# Ensure we record any reasons this method doesn't work. Especially
# since it's critical in displaying errors to users!
try:
# Attempt to identify the current application window.
parent_window = None
workbench = self.application.get_service('envisage.'
'workbench.IWorkbench')
if workbench is not None:
parent_window = workbench.active_window.control
# Display the requested message
if is_error:
error(parent_window, msg, title=title)
else:
information(parent_window, msg, title=title)
except:
logger.exception('Unable to display pop-up message')
return
#### EOF #####################################################################
| 0
| 0
| 0
|
20dc02eb654f867beadeef8c295396bcf7913d05
| 8,460
|
py
|
Python
|
metecho/tests/consumers.py
|
almostolmos/Metecho
|
7f58eca163faafea1ce07ffb6f4de2449fa0b8df
|
[
"BSD-3-Clause"
] | 21
|
2020-04-02T21:39:58.000Z
|
2022-01-31T19:43:47.000Z
|
metecho/tests/consumers.py
|
almostolmos/Metecho
|
7f58eca163faafea1ce07ffb6f4de2449fa0b8df
|
[
"BSD-3-Clause"
] | 1,613
|
2020-03-26T16:39:57.000Z
|
2022-03-07T14:54:16.000Z
|
metecho/tests/consumers.py
|
almostolmos/Metecho
|
7f58eca163faafea1ce07ffb6f4de2449fa0b8df
|
[
"BSD-3-Clause"
] | 21
|
2020-07-21T11:58:47.000Z
|
2021-11-25T00:48:21.000Z
|
import pytest
from channels.db import database_sync_to_async
from channels.testing import WebsocketCommunicator
from ..api.model_mixins import Request
from ..api.push import push_message_about_instance, report_error
from ..api.serializers import (
EpicSerializer,
ProjectSerializer,
ScratchOrgSerializer,
TaskSerializer,
)
from ..consumers import PushNotificationConsumer
from ..routing import websockets
pytestmark = pytest.mark.asyncio
@database_sync_to_async
@pytest.mark.django_db
@pytest.mark.django_db
@pytest.mark.django_db
@pytest.mark.django_db
@pytest.mark.django_db
@pytest.mark.django_db
@pytest.mark.django_db
@pytest.mark.django_db
# These tests need to go last, after any tests that start up a Communicator:
@pytest.mark.django_db
| 33.307087
| 88
| 0.711348
|
import pytest
from channels.db import database_sync_to_async
from channels.testing import WebsocketCommunicator
from ..api.model_mixins import Request
from ..api.push import push_message_about_instance, report_error
from ..api.serializers import (
EpicSerializer,
ProjectSerializer,
ScratchOrgSerializer,
TaskSerializer,
)
from ..consumers import PushNotificationConsumer
from ..routing import websockets
pytestmark = pytest.mark.asyncio
@database_sync_to_async
def serialize_model(serializer_model, instance, user):
serializer = serializer_model(instance, context={"request": Request(user)})
return serializer.data
@pytest.mark.django_db
async def test_push_notification_consumer__project(user_factory, project_factory):
user = await database_sync_to_async(user_factory)()
project = await database_sync_to_async(project_factory)()
communicator = WebsocketCommunicator(websockets, "/ws/notifications/")
communicator.scope["user"] = user
connected, _ = await communicator.connect()
assert connected
await communicator.send_json_to(
{"model": "project", "id": str(project.id), "action": "SUBSCRIBE"}
)
response = await communicator.receive_json_from()
assert "ok" in response
await push_message_about_instance(
project, {"type": "TEST_MESSAGE", "payload": {"originating_user_id": "abc"}}
)
response = await communicator.receive_json_from()
model = await serialize_model(ProjectSerializer, project, user)
assert response == {
"type": "TEST_MESSAGE",
"payload": {"originating_user_id": "abc", "model": model},
}
await communicator.disconnect()
@pytest.mark.django_db
async def test_push_notification_consumer__scratch_org__list(
user_factory, scratch_org_factory
):
user = await database_sync_to_async(user_factory)()
scratch_org = await database_sync_to_async(scratch_org_factory)()
communicator = WebsocketCommunicator(websockets, "/ws/notifications/")
communicator.scope["user"] = user
connected, _ = await communicator.connect()
assert connected
await communicator.send_json_to(
{"model": "scratch_org", "id": "list", "action": "SUBSCRIBE"}
)
response = await communicator.receive_json_from()
assert "ok" in response
await push_message_about_instance(
scratch_org,
{"type": "SCRATCH_ORG_RECREATE", "payload": {"originating_user_id": "abc"}},
for_list=True,
)
response = await communicator.receive_json_from()
model = await serialize_model(ScratchOrgSerializer, scratch_org, user)
assert response == {
"type": "SCRATCH_ORG_RECREATE",
"payload": {"originating_user_id": "abc", "model": model},
}
await communicator.disconnect()
@pytest.mark.django_db
async def test_push_notification_consumer__epic(user_factory, epic_factory):
user = await database_sync_to_async(user_factory)()
epic = await database_sync_to_async(epic_factory)(project__repo_id=1234)
communicator = WebsocketCommunicator(websockets, "/ws/notifications/")
communicator.scope["user"] = user
connected, _ = await communicator.connect()
assert connected
await communicator.send_json_to(
{"model": "epic", "id": str(epic.id), "action": "SUBSCRIBE"}
)
response = await communicator.receive_json_from()
assert "ok" in response
await push_message_about_instance(
epic, {"type": "TEST_MESSAGE", "payload": {"originating_user_id": "abc"}}
)
response = await communicator.receive_json_from()
model = await serialize_model(EpicSerializer, epic, user)
assert response == {
"type": "TEST_MESSAGE",
"payload": {"originating_user_id": "abc", "model": model},
}
await communicator.disconnect()
@pytest.mark.django_db
async def test_push_notification_consumer__task(user_factory, task_factory):
user = await database_sync_to_async(user_factory)()
task = await database_sync_to_async(task_factory)(epic__project__repo_id=4321)
communicator = WebsocketCommunicator(websockets, "/ws/notifications/")
communicator.scope["user"] = user
connected, _ = await communicator.connect()
assert connected
await communicator.send_json_to(
{"model": "task", "id": str(task.id), "action": "SUBSCRIBE"}
)
response = await communicator.receive_json_from()
assert "ok" in response
await push_message_about_instance(
task, {"type": "TEST_MESSAGE", "payload": {"originating_user_id": "abc"}}
)
response = await communicator.receive_json_from()
model = await serialize_model(TaskSerializer, task, user)
assert response == {
"type": "TEST_MESSAGE",
"payload": {"originating_user_id": "abc", "model": model},
}
await communicator.disconnect()
@pytest.mark.django_db
async def test_push_notification_consumer__scratch_org(
user_factory, scratch_org_factory
):
user = await database_sync_to_async(user_factory)()
scratch_org = await database_sync_to_async(scratch_org_factory)(
task__epic__project__repo_id=2468
)
communicator = WebsocketCommunicator(websockets, "/ws/notifications/")
communicator.scope["user"] = user
connected, _ = await communicator.connect()
assert connected
await communicator.send_json_to(
{"model": "scratch_org", "id": str(scratch_org.id), "action": "SUBSCRIBE"}
)
response = await communicator.receive_json_from()
assert "ok" in response
await push_message_about_instance(
scratch_org, {"type": "TEST_MESSAGE", "payload": {"originating_user_id": "abc"}}
)
response = await communicator.receive_json_from()
model = await serialize_model(ScratchOrgSerializer, scratch_org, user)
assert response == {
"type": "TEST_MESSAGE",
"payload": {"originating_user_id": "abc", "model": model},
}
await communicator.disconnect()
@pytest.mark.django_db
async def test_push_notification_consumer__report_error(user_factory):
user = await database_sync_to_async(user_factory)()
communicator = WebsocketCommunicator(websockets, "/ws/notifications/")
communicator.scope["user"] = user
connected, _ = await communicator.connect()
assert connected
await communicator.send_json_to(
{"model": "user", "id": str(user.id), "action": "SUBSCRIBE"}
)
response = await communicator.receive_json_from()
assert "ok" in response
await report_error(user)
response = await communicator.receive_json_from()
assert response == {
"type": "BACKEND_ERROR",
"payload": {"message": "There was an error"},
}
await communicator.disconnect()
@pytest.mark.django_db
async def test_push_notification_consumer__unsubscribe(user_factory):
user = await database_sync_to_async(user_factory)()
communicator = WebsocketCommunicator(websockets, "/ws/notifications/")
communicator.scope["user"] = user
connected, _ = await communicator.connect()
assert connected
await communicator.send_json_to(
{"model": "user", "id": str(user.id), "action": "SUBSCRIBE"}
)
response = await communicator.receive_json_from()
assert "ok" in response
await communicator.send_json_to(
{"model": "user", "id": str(user.id), "action": "UNSUBSCRIBE"}
)
response = await communicator.receive_json_from()
assert "ok" in response
await communicator.disconnect()
@pytest.mark.django_db
async def test_push_notification_consumer__invalid_subscription(user_factory):
user = await database_sync_to_async(user_factory)()
communicator = WebsocketCommunicator(websockets, "/ws/notifications/")
communicator.scope["user"] = user
connected, _ = await communicator.connect()
assert connected
await communicator.send_json_to({"model": "foobar", "id": "buzbaz"})
response = await communicator.receive_json_from()
assert "error" in response
await communicator.disconnect()
# These tests need to go last, after any tests that start up a Communicator:
@pytest.mark.django_db
async def test_push_notification_consumer__missing_instance():
content = {
"model_name": "scratchorg",
"id": "bet this is an invalid ID",
"payload": {},
}
consumer = PushNotificationConsumer()
new_content = await consumer.hydrate_message(content)
assert new_content == {"payload": {}}
| 7,456
| 0
| 220
|
f3976e2ec215dc1bd2bd45dd144b13e71688e6f1
| 6,227
|
py
|
Python
|
cajitos_site/users/routes.py
|
OlgaKuratkina/cajitos
|
0bc13f71281a1a67c8bcd1a3ae343ad0b14d9bad
|
[
"MIT"
] | null | null | null |
cajitos_site/users/routes.py
|
OlgaKuratkina/cajitos
|
0bc13f71281a1a67c8bcd1a3ae343ad0b14d9bad
|
[
"MIT"
] | 7
|
2020-05-08T19:51:22.000Z
|
2022-03-11T23:37:57.000Z
|
cajitos_site/users/routes.py
|
OlgaKuratkina/cajitos
|
0bc13f71281a1a67c8bcd1a3ae343ad0b14d9bad
|
[
"MIT"
] | null | null | null |
import markdown
from flask import redirect, url_for, flash, render_template, session, request, current_app, abort
from flask_login import current_user, login_user, logout_user, login_required
from cajitos_site import bcrypt
from cajitos_site.users import users
from cajitos_site.users.forms import RegistrationForm, LoginForm, UpdateAccountForm, RequestResetForm, ResetPasswordForm
from cajitos_site.models import User, load_user
from cajitos_site.utils.email import send_service_email
from cajitos_site.utils.utils import (
get_redirect_target, save_picture
)
from cajitos_site.utils.auth_utils import generate_google_auth_request, get_google_user_info
# Disbaled temporarily or forever
# @users.route("/register", methods=['GET', 'POST'])
@users.route("/login", methods=['GET', 'POST'])
@users.route('/google_login')
@users.route('/google_login/callback')
@users.route('/logout')
@users.route('/account/<int:user_id>')
@users.route('/account/<int:user_id>/update', methods=['GET', 'POST'])
@login_required
@users.route("/reset_password", methods=['GET', 'POST'])
@users.route("/reset_password/<token>", methods=['GET', 'POST'])
| 40.967105
| 120
| 0.696965
|
import markdown
from flask import redirect, url_for, flash, render_template, session, request, current_app, abort
from flask_login import current_user, login_user, logout_user, login_required
from cajitos_site import bcrypt
from cajitos_site.users import users
from cajitos_site.users.forms import RegistrationForm, LoginForm, UpdateAccountForm, RequestResetForm, ResetPasswordForm
from cajitos_site.models import User, load_user
from cajitos_site.utils.email import send_service_email
from cajitos_site.utils.utils import (
get_redirect_target, save_picture
)
from cajitos_site.utils.auth_utils import generate_google_auth_request, get_google_user_info
# Disbaled temporarily or forever
# @users.route("/register", methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('blog.posts'))
form = RegistrationForm()
if form.validate_on_submit():
user = User.create(username=form.username.data, email=form.email.data)
flash(f'Account created for {form.username.data}!', 'success')
flash(f'Check your email to confirm your new account', 'success')
token = user.get_validation_token()
reset_link = f"{url_for('users.validate_token', token=token, _external=True)}"
send_service_email(user, reset_link)
return redirect(url_for('blog.posts'))
return render_template('user/register.html', title='Register', form=form)
@users.route("/login", methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.select().where(User.email == form.email.data).first()
if user and user.status != 'Confirmed':
flash('You need to confirm your account to proceed!', 'info')
elif user and bcrypt.check_password_hash(user.password, form.password.data):
flash('You have been logged in!', 'success')
login_user(user, remember=form.remember.data)
next_page = get_redirect_target()
return redirect(next_page) if next_page else redirect(url_for('blog.posts'))
else:
flash('Login Unsuccessful. Please check email and password', 'danger')
return render_template('user/login.html', title='Login', form=form)
@users.route('/google_login')
def google_login():
request_uri = generate_google_auth_request()
return redirect(request_uri)
@users.route('/google_login/callback')
def callback():
userinfo_response = get_google_user_info(request)
if userinfo_response.get('email_verified'):
google_id = userinfo_response['sub']
email = userinfo_response['email']
profile_picture = userinfo_response['picture']
username = userinfo_response['given_name']
else:
return 'User email not available or not verified by Google.', 400
user = User.get_user_by_email(email)
if not user:
user = User.create(
google_id=google_id, username=username, email=email, password='', profile_picture=profile_picture,
status='Confirmed'
)
else:
user.google_id = google_id
user.username = username
if profile_picture:
user.profile_picture = profile_picture
user.status = 'Confirmed'
user.save()
login_user(user)
return redirect(url_for('blog.posts'))
@users.route('/logout')
def logout():
logout_user()
return redirect(url_for('blog.posts'))
@users.route('/account/<int:user_id>')
def account(user_id):
user = load_user(user_id)
return render_template('user/account.html', title='Account', user=user)
@users.route('/account/<int:user_id>/update', methods=['GET', 'POST'])
@login_required
def account_update(user_id):
form = UpdateAccountForm()
if request.method == 'GET':
form.username.data = current_user.username
form.email.data = current_user.email
form.about_me.data = current_user.about_me
if form.validate_on_submit() and current_user.id == user_id:
if form.picture.data:
picture_file = save_picture(form.picture.data)
current_user.profile_picture = picture_file
current_user.username = form.username.data
current_user.email = form.email.data
current_user.about_me = markdown.markdown(form.about_me.data)
current_user.save()
flash('Your account has been updated!', 'success')
return redirect(url_for('users.account', user_id=user_id))
elif current_user.id != user_id:
abort(403)
return render_template('create_entry.html', title='Account', form=form)
@users.route("/reset_password", methods=['GET', 'POST'])
def reset_request():
if current_user.is_authenticated:
return redirect(url_for('blog.posts'))
form = RequestResetForm()
if form.validate_on_submit():
user = User.select().where(User.email == form.email.data).first()
token = user.get_validation_token()
reset_link = f"{url_for('users.validate_token', token=token, _external=True)}"
send_service_email(user, reset_link, confirm_account=False)
flash('An email has been sent with instructions to complete operation.', 'info')
return redirect(url_for('users.login'))
return render_template('user/reset_request.html', title='Reset Password', form=form)
@users.route("/reset_password/<token>", methods=['GET', 'POST'])
def validate_token(token):
if current_user.is_authenticated:
return redirect(url_for('blog.posts'))
user = User.verify_token(token)
if user is None:
flash('That is an invalid or expired token', 'warning')
return redirect(url_for('users.reset_request'))
form = ResetPasswordForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user.password = hashed_password
# Instead of default implementation with user.is_active
user.status = 'Confirmed'
user.save()
flash('Your password has been updated! You are now able to log in', 'success')
return redirect(url_for('users.login'))
return render_template('user/validate_token.html', title='Reset Password', form=form)
| 4,876
| 0
| 198
|
43fc4974ba1213885593d4b53ba973eb01e9d576
| 9,049
|
py
|
Python
|
sdc/ysdc_dataset_api/dataset/dataset.py
|
sty61010/shifts
|
d3bb3086d8f2581f74644585701f4b1db4338483
|
[
"Apache-2.0"
] | null | null | null |
sdc/ysdc_dataset_api/dataset/dataset.py
|
sty61010/shifts
|
d3bb3086d8f2581f74644585701f4b1db4338483
|
[
"Apache-2.0"
] | null | null | null |
sdc/ysdc_dataset_api/dataset/dataset.py
|
sty61010/shifts
|
d3bb3086d8f2581f74644585701f4b1db4338483
|
[
"Apache-2.0"
] | null | null | null |
import json
import os
from typing import Callable, Union
from typing import Optional, List
import torch
from sdc.constants import SCENE_TAG_TYPE_TO_OPTIONS, VALID_TRAJECTORY_TAGS
from ..features import FeatureProducerBase
from ..proto import get_tags_from_request, proto_to_dict
from ..utils import (
get_file_paths,
get_gt_trajectory,
get_latest_track_state_by_id,
get_to_track_frame_transform,
read_feature_map_from_file,
request_is_valid,
scenes_generator,
transform_2d_points,
)
| 42.088372
| 99
| 0.637971
|
import json
import os
from typing import Callable, Union
from typing import Optional, List
import torch
from sdc.constants import SCENE_TAG_TYPE_TO_OPTIONS, VALID_TRAJECTORY_TAGS
from ..features import FeatureProducerBase
from ..proto import get_tags_from_request, proto_to_dict
from ..utils import (
get_file_paths,
get_gt_trajectory,
get_latest_track_state_by_id,
get_to_track_frame_transform,
read_feature_map_from_file,
request_is_valid,
scenes_generator,
transform_2d_points,
)
class MotionPredictionDataset(torch.utils.data.IterableDataset):
def __init__(
self,
dataset_path: str,
scene_tags_fpath: str,
feature_producer: FeatureProducerBase = None,
prerendered_dataset_path: str = None,
transform_ground_truth_to_agent_frame: bool = True,
scene_tags_filter: Union[Callable, None] = None,
trajectory_tags_filter: Union[Callable, None] = None,
pre_filtered_scene_file_paths: Optional[List[str]] = None,
yield_metadata=False
):
"""Pytorch-style dataset class for the motion prediction task.
Dataset iterator performs iteration over scenes in the dataset and individual prediction
requests in each scene. Iterator yields dict that can have the following structure:
{
'scene_id': str,
'track_id': int,
'scene_tags': Dict[str, str],
'ground_truth_trajectory': np.ndarray,
'prerendered_feature_map': np.ndarray,
'feature_maps': np.ndarray,
}.
'scene_id' unique scene identifier.
'track_id' vehicle id of the current prediction request.
'ground_truth_trajectory' field is always included, it contains ground truth trajectory for
the current prediction request.
'prerendered_feature_map' field would be present if prerendered_dataset_path was specified,
contains pre-rendered feature maps.
'feature_maps' field would be present if user passes an instance of
ysdc_dataset_api.features.FeatureRenderer, contains feature maps rendered on the fly by
specified renderer instance.
Args:
dataset_path: path to the dataset directory
scene_tags_fpath: path to the tags file
feature_producer: instance of the FeatureProducerBase class,
used to generate features for a data item. Defaults to None.
prerendered_dataset_path: path to the pre-rendered dataset. Defaults to None.
transform_ground_truth_to_agent_frame: whether to transform ground truth
trajectory to an agent coordinate system or return global coordinates.
Defaults to True.
scene_tags_filter: function to filter dataset scenes by tags. Defaults to None.
trajectory_tags_filter: function to filter prediction requests by trajectory tags.
Defaults to None.
Raises:
ValueError: if none of feature_producer or prerendered_dataset_path was specified.
"""
super(MotionPredictionDataset, self).__init__()
self._feature_producer = feature_producer
self._prerendered_dataset_path = prerendered_dataset_path
self._transform_ground_truth_to_agent_frame = transform_ground_truth_to_agent_frame
self._scene_tags_filter = _callable_or_trivial_filter(scene_tags_filter)
self._trajectory_tags_filter = _callable_or_trivial_filter(trajectory_tags_filter)
self._yield_metadata = yield_metadata
if pre_filtered_scene_file_paths is not None:
print('Building MotionPredictionDataset with pre-filtered '
'scene file paths.')
self._scene_file_paths = pre_filtered_scene_file_paths
else:
self._scene_file_paths = self._filter_paths(
get_file_paths(dataset_path), scene_tags_fpath)
@property
def num_scenes(self) -> int:
"""Number of scenes in the dataset"""
return len(self._scene_file_paths)
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
if worker_info is None:
file_paths = self._scene_file_paths
else:
file_paths = self._split_filepaths_by_worker(
worker_info.id, worker_info.num_workers)
def data_gen(_file_paths: List[str]):
for scene, fpath in scenes_generator(_file_paths, yield_fpath=True):
for request in scene.prediction_requests:
if not request_is_valid(scene, request):
continue
trajectory_tags = get_tags_from_request(request)
if not self._trajectory_tags_filter(trajectory_tags):
continue
track = get_latest_track_state_by_id(scene, request.track_id)
to_track_frame_tf = get_to_track_frame_transform(track)
ground_truth_trajectory = get_gt_trajectory(scene, request.track_id)
if self._transform_ground_truth_to_agent_frame:
ground_truth_trajectory = transform_2d_points(
ground_truth_trajectory, to_track_frame_tf)
result = {
'ground_truth_trajectory': ground_truth_trajectory,
'scene_id': scene.id,
'track_id': request.track_id,
'scene_tags': proto_to_dict(scene.scene_tags),
}
if self._prerendered_dataset_path:
fm_path = self._get_serialized_fm_path(fpath, scene.id, request.track_id)
result['prerendered_feature_map'] = read_feature_map_from_file(fm_path)
if self._feature_producer:
result.update(
self._feature_producer.produce_features(scene, to_track_frame_tf))
if self._yield_metadata:
result = (
self.add_metadata_to_batch(
scene=scene, request=request,
trajectory_tags=trajectory_tags,
batch=result))
yield result
return data_gen(file_paths)
def add_metadata_to_batch(self, scene, request, trajectory_tags, batch):
batch['scene_id'] = scene.id
batch['request_id'] = request.track_id
# Note that some will be "invalid"
batch['num_vehicles'] = len(scene.prediction_requests)
scene_tags_dict = proto_to_dict(scene.scene_tags)
for scene_tag_type in SCENE_TAG_TYPE_TO_OPTIONS.keys():
scene_tag_options = SCENE_TAG_TYPE_TO_OPTIONS[scene_tag_type]
for scene_tag_option in scene_tag_options:
try:
batch[f'{scene_tag_type}__{scene_tag_option}'] = int(
scene_tags_dict[scene_tag_type] == scene_tag_option)
except KeyError:
batch[f'{scene_tag_type}__{scene_tag_option}'] = -1
trajectory_tags = set(trajectory_tags)
for trajectory_tag in VALID_TRAJECTORY_TAGS:
batch[trajectory_tag] = (trajectory_tag in trajectory_tags)
return batch
def _get_serialized_fm_path(self, scene_fpath, scene_id, track_id):
base, _ = os.path.split(scene_fpath)
_, subdir = os.path.split(base)
return os.path.join(self._prerendered_dataset_path, subdir, f'{scene_id}_{track_id}.npy')
def _split_filepaths_by_worker(self, worker_id, num_workers):
n_scenes_per_worker = self.num_scenes // num_workers
split = list(range(0, self.num_scenes, n_scenes_per_worker))
start = split[worker_id]
if worker_id == num_workers - 1:
stop = self.num_scenes
else:
stop = split[worker_id + 1]
return self._scene_file_paths[start:stop]
def _callable_or_lambda_true(self, f):
if f is None:
return lambda x: True
if not callable(f):
raise ValueError('Expected callable, got {}'.format(type(f)))
return f
def _filter_paths(self, file_paths, scene_tags_fpath):
valid_indices = []
with open(scene_tags_fpath, 'r') as f:
for i, line in enumerate(f):
tags = json.loads(line.strip())
if self._scene_tags_filter(tags):
valid_indices.append(i)
print(
f'{len(valid_indices)}/{len(file_paths)} '
f'scenes fit the filter criteria.')
return [file_paths[i] for i in valid_indices]
def _callable_or_trivial_filter(f):
if f is None:
return _trivial_filter
if not callable(f):
raise ValueError('Expected callable, got {}'.format(type(f)))
return f
def _trivial_filter(x):
return True
| 4,714
| 3,746
| 69
|
351525ff3510e81241132c03602b819a2a740942
| 70
|
py
|
Python
|
core/src/static_classes/__init__.py
|
azurlane-doujin/AzurLanePaintingExtract-v1.0
|
ef4f25e70b3ca1b9df4304132cc7612c8f5efebb
|
[
"MIT"
] | 144
|
2019-06-13T06:43:43.000Z
|
2022-03-29T15:07:57.000Z
|
core/src/static_classes/__init__.py
|
Shabi1213/AzurLanePaintingExtract-v1.0
|
ef4f25e70b3ca1b9df4304132cc7612c8f5efebb
|
[
"MIT"
] | 2
|
2020-08-02T15:08:58.000Z
|
2021-11-29T02:34:18.000Z
|
core/src/static_classes/__init__.py
|
Goodjooy/ArknightsPaintingExtract
|
e1e6ef339c6f76cab45a26df66497126c11a21a8
|
[
"MIT"
] | 19
|
2020-03-01T10:06:52.000Z
|
2022-02-06T13:49:26.000Z
|
__all__ = ["file_read", 'image_deal', 'search_order', 'static_data']
| 35
| 69
| 0.7
|
__all__ = ["file_read", 'image_deal', 'search_order', 'static_data']
| 0
| 0
| 0
|
13e87111dffd55a11464ba7c203a6cc1cb2cb9ac
| 412
|
py
|
Python
|
Demo/wdt/example_wdt_file.py
|
quecpython/EC100Y-SDK
|
712c7eb7b54a3971009d94f6d6b21a6011d56f68
|
[
"MIT"
] | 4
|
2021-01-28T01:30:59.000Z
|
2021-06-15T07:13:41.000Z
|
Demo/wdt/example_wdt_file.py
|
QuePython/EC100Y-SDK
|
712c7eb7b54a3971009d94f6d6b21a6011d56f68
|
[
"MIT"
] | null | null | null |
Demo/wdt/example_wdt_file.py
|
QuePython/EC100Y-SDK
|
712c7eb7b54a3971009d94f6d6b21a6011d56f68
|
[
"MIT"
] | 3
|
2021-04-07T09:55:59.000Z
|
2022-01-08T15:15:23.000Z
|
'''
@Author: Pawn
@Date: 2020-08-12
@LastEditTime: 2020-08-12 17:06:08
@Description: example for module timer
@FilePath: example_wdt.py
'''
from machine import WDT
from machine import Timer
timer1 = Timer(Timer.Timer1)
if __name__ == '__main__':
wdt = WDT(20) # 启动看门狗,间隔时长
timer1.start(period=15000, mode=timer1.PERIODIC, callback=feed) # 使用定时器喂狗
# wdt.stop()
| 17.913043
| 78
| 0.682039
|
'''
@Author: Pawn
@Date: 2020-08-12
@LastEditTime: 2020-08-12 17:06:08
@Description: example for module timer
@FilePath: example_wdt.py
'''
from machine import WDT
from machine import Timer
timer1 = Timer(Timer.Timer1)
def feed(t):
wdt.feed()
if __name__ == '__main__':
wdt = WDT(20) # 启动看门狗,间隔时长
timer1.start(period=15000, mode=timer1.PERIODIC, callback=feed) # 使用定时器喂狗
# wdt.stop()
| 6
| 0
| 23
|
bcb5024cd6f5e64a630af32466bb1b12cbac2b4a
| 2,752
|
py
|
Python
|
users/tests/test_urls.py
|
jewells07/mumbleapi
|
beee0b50eefb3b1ff3e21073400c778323eece98
|
[
"Apache-2.0"
] | 1
|
2021-05-18T11:37:44.000Z
|
2021-05-18T11:37:44.000Z
|
users/tests/test_urls.py
|
TomNewton1/mumbleapi
|
108d5a841b97d38285bede523f243624e05bc231
|
[
"Apache-2.0"
] | null | null | null |
users/tests/test_urls.py
|
TomNewton1/mumbleapi
|
108d5a841b97d38285bede523f243624e05bc231
|
[
"Apache-2.0"
] | null | null | null |
from django.conf.urls import url
from django.urls import reverse , resolve
from rest_framework import status
from rest_framework.test import APITestCase
from users.views import (
followUser , users , UserProfileUpdate ,
ProfilePictureUpdate , usersRecommended ,
user , userMumbles, userArticles, passwordChange,
sendActivationEmail, sendActivationEmail , activate)
# Create your tests here.
| 37.69863
| 87
| 0.703125
|
from django.conf.urls import url
from django.urls import reverse , resolve
from rest_framework import status
from rest_framework.test import APITestCase
from users.views import (
followUser , users , UserProfileUpdate ,
ProfilePictureUpdate , usersRecommended ,
user , userMumbles, userArticles, passwordChange,
sendActivationEmail, sendActivationEmail , activate)
# Create your tests here.
class AccountTests(APITestCase):
def setUp(self):
pass
def test_users_url(self):
url = 'users-api:users'
reversed_url = reverse(url)
response = self.client.get('/api/users/')
self.assertEqual(resolve(reversed_url).func,users)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_users_follow_url(self):
url = 'users-api:follow-user'
reversed_url = reverse(url,args=['praveen'])
self.assertEqual(resolve(reversed_url).func,followUser)
def test_user_profile_update_url(self):
url = 'users-api:profile_update'
reversed_url = reverse(url)
self.assertEqual(resolve(reversed_url).func.view_class,UserProfileUpdate)
def test_profile_update_photo_url(self):
url = 'users-api:profile_update_photo'
reversed_url = reverse(url)
resolved = resolve(reversed_url).func
self.assertEqual(resolved.view_class,ProfilePictureUpdate)
def test_users_recommended_url(self):
url = 'users-api:users-recommended'
reversed_url = reverse(url)
self.assertEqual(resolve(reversed_url).func,usersRecommended)
def test_user_url(self):
url = 'users-api:user'
reversed_url = reverse(url,args=['test'])
self.assertEqual(resolve(reversed_url).func,user)
def test_user_mumbles(self):
url = 'users-api:user-mumbles'
reversed_url = reverse(url,args=['test'])
self.assertEqual(resolve(reversed_url).func,userMumbles)
def test_user_articles_url(self):
url = 'users-api:user-articles'
reversed_url = reverse(url,args=['test'])
self.assertEqual(resolve(reversed_url).func,userArticles)
def test_user_password_url(self):
url = 'users-api:password-change'
reversed_url = reverse(url)
self.assertEqual(resolve(reversed_url).func,passwordChange)
def test_send_activation_email_url(self):
url = 'users-api:send-activation-email'
reversed_url = reverse(url)
self.assertEqual(resolve(reversed_url).func,sendActivationEmail)
def test_active_user_account_url(self):
url = 'users-api:verify'
reversed_url = reverse(url,args=['903u924u934u598348943','*&6g83chruhrweriuj'])
self.assertEqual(resolve(reversed_url).func,activate)
| 1,987
| 11
| 347
|
6a95b14f3ec8c3f933b91466b0d3fff7d5b8dd2e
| 520
|
py
|
Python
|
common/connector.py
|
ex0hunt/redrat
|
08ba8f088fcfb3ea246c56305420c2bc9e77517f
|
[
"BSD-2-Clause"
] | null | null | null |
common/connector.py
|
ex0hunt/redrat
|
08ba8f088fcfb3ea246c56305420c2bc9e77517f
|
[
"BSD-2-Clause"
] | null | null | null |
common/connector.py
|
ex0hunt/redrat
|
08ba8f088fcfb3ea246c56305420c2bc9e77517f
|
[
"BSD-2-Clause"
] | null | null | null |
import configparser
import os
from redmine import Redmine
| 34.666667
| 76
| 0.713462
|
import configparser
import os
from redmine import Redmine
def redmine():
rootdir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
config_path = os.path.join(rootdir, 'settings.conf')
config = configparser.ConfigParser()
config.read(config_path)
host = config.get('RedmineServer', 'host')
username = config.get('RedmineServer', 'username')
password = config.get('RedmineServer', 'password')
redmine = Redmine(host, username=username, password=password)
return redmine
| 439
| 0
| 23
|
73e1afd1d4cf91f0ff98fd1d78bfc8ce897e5c54
| 4,921
|
py
|
Python
|
src/Testing/ZopeTestCase/utils.py
|
tseaver/Zope-RFA
|
08634f39b0f8b56403a2a9daaa6ee4479ef0c625
|
[
"ZPL-2.1"
] | 2
|
2015-12-21T10:34:56.000Z
|
2017-09-24T11:07:58.000Z
|
src/Testing/ZopeTestCase/utils.py
|
MatthewWilkes/Zope
|
740f934fc9409ae0062e8f0cd6dcfd8b2df00376
|
[
"ZPL-2.1"
] | null | null | null |
src/Testing/ZopeTestCase/utils.py
|
MatthewWilkes/Zope
|
740f934fc9409ae0062e8f0cd6dcfd8b2df00376
|
[
"ZPL-2.1"
] | null | null | null |
##############################################################################
#
# Copyright (c) 2005 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Utility functions
These functions are designed to be imported and run at
module level to add functionality to the test environment.
"""
import os
import sys
import time
import random
import transaction
import layer
@layer.appcall
def setupCoreSessions(app):
'''Sets up the session_data_manager e.a.'''
from Acquisition import aq_base
commit = 0
if not hasattr(app, 'temp_folder'):
from Products.TemporaryFolder.TemporaryFolder import MountedTemporaryFolder
tf = MountedTemporaryFolder('temp_folder', 'Temporary Folder')
app._setObject('temp_folder', tf)
commit = 1
if not hasattr(aq_base(app.temp_folder), 'session_data'):
from Products.Transience.Transience import TransientObjectContainer
toc = TransientObjectContainer('session_data',
'Session Data Container',
timeout_mins=3,
limit=100)
app.temp_folder._setObject('session_data', toc)
commit = 1
if not hasattr(app, 'browser_id_manager'):
from Products.Sessions.BrowserIdManager import BrowserIdManager
bid = BrowserIdManager('browser_id_manager',
'Browser Id Manager')
app._setObject('browser_id_manager', bid)
commit = 1
if not hasattr(app, 'session_data_manager'):
from Products.Sessions.SessionDataManager import SessionDataManager
sdm = SessionDataManager('session_data_manager',
title='Session Data Manager',
path='/temp_folder/session_data',
requestName='SESSION')
app._setObject('session_data_manager', sdm)
commit = 1
if commit:
transaction.commit()
@layer.appcall
def setupSiteErrorLog(app):
'''Sets up the error_log object required by ZPublisher.'''
if not hasattr(app, 'error_log'):
try:
from Products.SiteErrorLog.SiteErrorLog import SiteErrorLog
except ImportError:
pass
else:
app._setObject('error_log', SiteErrorLog())
transaction.commit()
def importObjectFromFile(container, filename, quiet=0):
'''Imports an object from a (.zexp) file into the given container.'''
from ZopeLite import _print, _patched
quiet = quiet or not _patched
start = time.time()
if not quiet: _print("Importing %s ... " % os.path.basename(filename))
container._importObjectFromFile(filename, verify=0)
transaction.commit()
if not quiet: _print('done (%.3fs)\n' % (time.time() - start))
_Z2HOST = None
_Z2PORT = None
def startZServer(number_of_threads=1, log=None):
'''Starts an HTTP ZServer thread.'''
global _Z2HOST, _Z2PORT
if _Z2HOST is None:
_Z2HOST = '127.0.0.1'
_Z2PORT = random.choice(range(55000, 55500))
from threadutils import setNumberOfThreads
setNumberOfThreads(number_of_threads)
from threadutils import QuietThread, zserverRunner
t = QuietThread(target=zserverRunner, args=(_Z2HOST, _Z2PORT, log))
t.setDaemon(1)
t.start()
time.sleep(0.1) # Sandor Palfy
return _Z2HOST, _Z2PORT
def makerequest(app, stdout=sys.stdout):
'''Wraps the app into a fresh REQUEST.'''
from Testing.makerequest import makerequest as _makerequest
environ = {}
environ['SERVER_NAME'] = _Z2HOST or 'nohost'
environ['SERVER_PORT'] = '%d' % (_Z2PORT or 80)
environ['REQUEST_METHOD'] = 'GET'
return _makerequest(app, stdout=stdout, environ=environ)
def appcall(func, *args, **kw):
'''Calls a function passing 'app' as first argument.'''
from base import app, close
app = app()
args = (app,) + args
try:
return func(*args, **kw)
finally:
transaction.abort()
close(app)
def makelist(arg):
'''Turns arg into a list. Where arg may be
list, tuple, or string.
'''
if type(arg) == type([]):
return arg
if type(arg) == type(()):
return list(arg)
if type(arg) == type(''):
return filter(None, [arg])
raise ValueError('Argument must be list, tuple, or string')
__all__ = [
'setupCoreSessions',
'setupSiteErrorLog',
'startZServer',
'importObjectFromFile',
'appcall',
'makerequest',
'makelist',
]
| 31.544872
| 83
| 0.636456
|
##############################################################################
#
# Copyright (c) 2005 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Utility functions
These functions are designed to be imported and run at
module level to add functionality to the test environment.
"""
import os
import sys
import time
import random
import transaction
import layer
@layer.appcall
def setupCoreSessions(app):
'''Sets up the session_data_manager e.a.'''
from Acquisition import aq_base
commit = 0
if not hasattr(app, 'temp_folder'):
from Products.TemporaryFolder.TemporaryFolder import MountedTemporaryFolder
tf = MountedTemporaryFolder('temp_folder', 'Temporary Folder')
app._setObject('temp_folder', tf)
commit = 1
if not hasattr(aq_base(app.temp_folder), 'session_data'):
from Products.Transience.Transience import TransientObjectContainer
toc = TransientObjectContainer('session_data',
'Session Data Container',
timeout_mins=3,
limit=100)
app.temp_folder._setObject('session_data', toc)
commit = 1
if not hasattr(app, 'browser_id_manager'):
from Products.Sessions.BrowserIdManager import BrowserIdManager
bid = BrowserIdManager('browser_id_manager',
'Browser Id Manager')
app._setObject('browser_id_manager', bid)
commit = 1
if not hasattr(app, 'session_data_manager'):
from Products.Sessions.SessionDataManager import SessionDataManager
sdm = SessionDataManager('session_data_manager',
title='Session Data Manager',
path='/temp_folder/session_data',
requestName='SESSION')
app._setObject('session_data_manager', sdm)
commit = 1
if commit:
transaction.commit()
@layer.appcall
def setupSiteErrorLog(app):
'''Sets up the error_log object required by ZPublisher.'''
if not hasattr(app, 'error_log'):
try:
from Products.SiteErrorLog.SiteErrorLog import SiteErrorLog
except ImportError:
pass
else:
app._setObject('error_log', SiteErrorLog())
transaction.commit()
def importObjectFromFile(container, filename, quiet=0):
'''Imports an object from a (.zexp) file into the given container.'''
from ZopeLite import _print, _patched
quiet = quiet or not _patched
start = time.time()
if not quiet: _print("Importing %s ... " % os.path.basename(filename))
container._importObjectFromFile(filename, verify=0)
transaction.commit()
if not quiet: _print('done (%.3fs)\n' % (time.time() - start))
_Z2HOST = None
_Z2PORT = None
def startZServer(number_of_threads=1, log=None):
'''Starts an HTTP ZServer thread.'''
global _Z2HOST, _Z2PORT
if _Z2HOST is None:
_Z2HOST = '127.0.0.1'
_Z2PORT = random.choice(range(55000, 55500))
from threadutils import setNumberOfThreads
setNumberOfThreads(number_of_threads)
from threadutils import QuietThread, zserverRunner
t = QuietThread(target=zserverRunner, args=(_Z2HOST, _Z2PORT, log))
t.setDaemon(1)
t.start()
time.sleep(0.1) # Sandor Palfy
return _Z2HOST, _Z2PORT
def makerequest(app, stdout=sys.stdout):
'''Wraps the app into a fresh REQUEST.'''
from Testing.makerequest import makerequest as _makerequest
environ = {}
environ['SERVER_NAME'] = _Z2HOST or 'nohost'
environ['SERVER_PORT'] = '%d' % (_Z2PORT or 80)
environ['REQUEST_METHOD'] = 'GET'
return _makerequest(app, stdout=stdout, environ=environ)
def appcall(func, *args, **kw):
'''Calls a function passing 'app' as first argument.'''
from base import app, close
app = app()
args = (app,) + args
try:
return func(*args, **kw)
finally:
transaction.abort()
close(app)
def makelist(arg):
'''Turns arg into a list. Where arg may be
list, tuple, or string.
'''
if type(arg) == type([]):
return arg
if type(arg) == type(()):
return list(arg)
if type(arg) == type(''):
return filter(None, [arg])
raise ValueError('Argument must be list, tuple, or string')
__all__ = [
'setupCoreSessions',
'setupSiteErrorLog',
'startZServer',
'importObjectFromFile',
'appcall',
'makerequest',
'makelist',
]
| 0
| 0
| 0
|
c4ac1344ac12b2b41b5b5813289b0939cfb026e8
| 977
|
py
|
Python
|
experiments/mcompress/set_options.py
|
paralab/EigenMM
|
5c94233524ae2758ebf47c3b3fdb6570a6cc4e59
|
[
"MIT"
] | null | null | null |
experiments/mcompress/set_options.py
|
paralab/EigenMM
|
5c94233524ae2758ebf47c3b3fdb6570a6cc4e59
|
[
"MIT"
] | null | null | null |
experiments/mcompress/set_options.py
|
paralab/EigenMM
|
5c94233524ae2758ebf47c3b3fdb6570a6cc4e59
|
[
"MIT"
] | null | null | null |
emm_fmt = """<?xml version="1.0" encoding="utf-8" ?>
<EIGEN_MM>
<OPTIONS
_splitmaxiters="10"
_nodesperevaluator="1"
_subproblemsperevaluator="1"
_totalsubproblems="1"
_nevaluators="1"
_taskspernode="%d"
_nevals="-1"
_nk="10"
_nb="4"
_p="0"
_nv="10"
_raditers="20"
_splittol="0.9"
_radtol="1e-8"
_L="1.1"
_R="-1"
_terse="0"
_details="0"
_debug="1"
_save_correctness="0"
_save_operators="0"
_save_eigenvalues="0"
_save_eigenbasis="1"
_correctness_filename=""
_operators_filename=""
_eigenvalues_filename=""
_eigenbasis_filename="%s" />
</EIGEN_MM>"""
import sys
if __name__ == "__main__":
taskspernode = int(sys.argv[1])
optionsdir = sys.argv[2]
outputdir = sys.argv[3]
expname = sys.argv[4]
emmpath = optionsdir + "/" + expname + "_options.xml"
f = open(emmpath, 'w')
f_str = emm_fmt % (taskspernode, outputdir + "/" + expname)
f.write(f_str)
f.close()
| 20.354167
| 63
| 0.616172
|
emm_fmt = """<?xml version="1.0" encoding="utf-8" ?>
<EIGEN_MM>
<OPTIONS
_splitmaxiters="10"
_nodesperevaluator="1"
_subproblemsperevaluator="1"
_totalsubproblems="1"
_nevaluators="1"
_taskspernode="%d"
_nevals="-1"
_nk="10"
_nb="4"
_p="0"
_nv="10"
_raditers="20"
_splittol="0.9"
_radtol="1e-8"
_L="1.1"
_R="-1"
_terse="0"
_details="0"
_debug="1"
_save_correctness="0"
_save_operators="0"
_save_eigenvalues="0"
_save_eigenbasis="1"
_correctness_filename=""
_operators_filename=""
_eigenvalues_filename=""
_eigenbasis_filename="%s" />
</EIGEN_MM>"""
import sys
if __name__ == "__main__":
taskspernode = int(sys.argv[1])
optionsdir = sys.argv[2]
outputdir = sys.argv[3]
expname = sys.argv[4]
emmpath = optionsdir + "/" + expname + "_options.xml"
f = open(emmpath, 'w')
f_str = emm_fmt % (taskspernode, outputdir + "/" + expname)
f.write(f_str)
f.close()
| 0
| 0
| 0
|
cb18427c6dda988b4a46b9e6269b431bec7b5ea3
| 5,758
|
py
|
Python
|
qtpyvcp/widgets/display_widgets/atc_widget/atc.py
|
awigen/qtpyvcp
|
5a23c4bca78accb159a76ac03652c74d5a07d14f
|
[
"BSD-3-Clause-LBNL",
"MIT"
] | null | null | null |
qtpyvcp/widgets/display_widgets/atc_widget/atc.py
|
awigen/qtpyvcp
|
5a23c4bca78accb159a76ac03652c74d5a07d14f
|
[
"BSD-3-Clause-LBNL",
"MIT"
] | null | null | null |
qtpyvcp/widgets/display_widgets/atc_widget/atc.py
|
awigen/qtpyvcp
|
5a23c4bca78accb159a76ac03652c74d5a07d14f
|
[
"BSD-3-Clause-LBNL",
"MIT"
] | null | null | null |
import os
# Workarround for nvidia propietary drivers
import ctypes
import ctypes.util
ctypes.CDLL(ctypes.util.find_library("GL"), mode=ctypes.RTLD_GLOBAL)
# end of Workarround
from qtpy.QtCore import Signal, Slot, QUrl, QTimer
from qtpy.QtQuickWidgets import QQuickWidget
from qtpyvcp.plugins import getPlugin
from qtpyvcp.utilities import logger
from qtpyvcp.utilities.hal_qlib import QComponent
LOG = logger.getLogger(__name__)
STATUS = getPlugin('status')
TOOLTABLE = getPlugin('tooltable')
IN_DESIGNER = os.getenv('DESIGNER', False)
WIDGET_PATH = os.path.dirname(os.path.abspath(__file__))
| 30.146597
| 83
| 0.633032
|
import os
# Workarround for nvidia propietary drivers
import ctypes
import ctypes.util
ctypes.CDLL(ctypes.util.find_library("GL"), mode=ctypes.RTLD_GLOBAL)
# end of Workarround
from qtpy.QtCore import Signal, Slot, QUrl, QTimer
from qtpy.QtQuickWidgets import QQuickWidget
from qtpyvcp.plugins import getPlugin
from qtpyvcp.utilities import logger
from qtpyvcp.utilities.hal_qlib import QComponent
LOG = logger.getLogger(__name__)
STATUS = getPlugin('status')
TOOLTABLE = getPlugin('tooltable')
IN_DESIGNER = os.getenv('DESIGNER', False)
WIDGET_PATH = os.path.dirname(os.path.abspath(__file__))
class DynATC(QQuickWidget):
moveToPocketSig = Signal(int, int, arguments=['previous_pocket', 'pocket_num'])
# toolInSpindleSig = Signal(int, arguments=['tool_num'])
rotateFwdSig = Signal(int, arguments=['steps'])
rotateRevSig = Signal(int, arguments=['steps'])
showToolSig = Signal(int, int, arguments=['pocket', 'tool_num'])
hideToolSig = Signal(int, arguments=['tool_num'])
homeMsgSig = Signal(str, arguments=["message"])
homingMsgSig = Signal(str, arguments=["message"])
def __init__(self, parent=None):
super(DynATC, self).__init__(parent)
if IN_DESIGNER:
return
self.atc_position = 0
self.pocket = 1
self.home = 0
self.homing = 0
self.pocket_slots = 12
self.component = QComponent("atc-widget")
# define pocket pins to store tools
for i in range(self.pocket_slots):
pin_name = "pocket-{}".format(i+1)
self.component.newPin(pin_name, "s32", "in")
self.component[pin_name].valueChanged.connect(self.pocket_changed)
self.component.newPin('home', "float", "in")
self.component.newPin('homing', "float", "in")
self.component.newPin("goto", "float", "in")
self.component.newPin('goto-enable', "bit", "in")
self.component.newPin("steps", "float", "in")
self.component.newPin('steps-fwd', "bit", "in")
self.component.newPin('steps-rev', "bit", "in")
self.component.newPin('jog-fwd', "bit", "in")
self.component.newPin('jog-rev', "bit", "in")
self.component['home'].valueIncreased.connect(self.home_message)
self.component['homing'].valueIncreased.connect(self.homing_message)
self.component['goto-enable'].valueIncreased.connect(self.goto)
self.component['steps-fwd'].valueIncreased.connect(self.steps_fwd)
self.component['steps-rev'].valueIncreased.connect(self.steps_rev)
self.component['jog-fwd'].valueIncreased.connect(self.jog_fwd)
self.component['jog-rev'].valueIncreased.connect(self.jog_rev)
self.component.ready()
self.engine().rootContext().setContextProperty("atc_spiner", self)
qml_path = os.path.join(WIDGET_PATH, "atc.qml")
url = QUrl.fromLocalFile(qml_path)
self.setSource(url) # Fixme fails on qtdesigner
self.tool_table = None
self.status_tool_table = None
self.pockets = dict()
self.tools = None
self.load_tools()
self.draw_tools()
STATUS.tool_table.notify(self.load_tools)
STATUS.pocket_prepped.notify(self.on_pocket_prepped)
STATUS.tool_in_spindle.notify(self.on_tool_in_spindle)
def hideEvent(self, *args, **kwargs):
pass # hack to prevent animation glitch when we are on another tab
def load_tools(self):
self.tool_table = TOOLTABLE.getToolTable()
self.status_tool_table = STATUS.tool_table
self.pockets = dict()
self.tools = dict()
for i in range(self.pocket_slots):
pin_name = "pocket-{}".format(i+1)
self.pockets[i + 1] = self.component[pin_name].value
def draw_tools(self):
for i in range(1, 13):
self.hideToolSig.emit(i)
for pocket, tool in self.pockets.items():
if 0 < pocket < 13:
if tool != 0:
self.showToolSig.emit(pocket, tool)
def pocket_changed(self):
self.load_tools()
self.draw_tools()
def on_tool_in_spindle(self, tool):
self.load_tools()
self.draw_tools()
def on_pocket_prepped(self, pocket_num):
self.load_tools()
self.draw_tools()
def homing_message(self, *args, **kwargs):
self.homing = args[0]
if self.homing:
self.homingMsgSig.emit("REFERENCING")
else:
self.homingMsgSig.emit("")
def home_message(self, *args, **kwargs):
self.home = args[0]
if self.homing:
self.homeMsgSig.emit("")
else:
self.homeMsgSig.emit("UN REFERENCED")
def goto(self):
self.component["goto-enable"].value = 0
pocket = self.component["goto"].value
if self.pocket > pocket:
steps = self.pocket - pocket
self.rotate_rev(steps)
elif self.pocket < pocket:
steps = pocket - self.pocket
self.rotate_fwd(steps)
def steps_fwd(self):
self.component["steps-fwd"].value = 0
steps = self.component["steps"].value
self.rotate_fwd(steps)
def steps_rev(self):
self.component["steps-rev"].value = 0
steps = self.component["steps"].value
self.rotate_rev(steps)
def rotate_fwd(self, steps):
self.rotateFwdSig.emit(steps)
def rotate_rev(self, steps):
self.rotateRevSig.emit(steps)
def jog_fwd(self, *args, **kwargs):
self.rotateFwdSig.emit(1)
self.command.set_digital_output(5, 0)
def jog_rev(self, *args, **kwargs):
self.rotateRevSig.emit(1)
self.command.set_digital_output(6, 0)
| 4,211
| 920
| 23
|
0515dfbce20f8b6db5af0d540ac7d973ccefba31
| 603
|
py
|
Python
|
oreo_backend/memes/migrations/0003_auto_20211108_1250.py
|
TaipeiTechIAEWorkplace/Website
|
fc962d5f8163c08f901fe4d97af14b8e7b3cfc9c
|
[
"MIT"
] | 1
|
2022-02-06T07:08:13.000Z
|
2022-02-06T07:08:13.000Z
|
oreo_backend/memes/migrations/0003_auto_20211108_1250.py
|
TaipeiTechIAEWorkplace/Website
|
fc962d5f8163c08f901fe4d97af14b8e7b3cfc9c
|
[
"MIT"
] | null | null | null |
oreo_backend/memes/migrations/0003_auto_20211108_1250.py
|
TaipeiTechIAEWorkplace/Website
|
fc962d5f8163c08f901fe4d97af14b8e7b3cfc9c
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.9 on 2021-11-08 04:50
from django.db import migrations, models
| 22.333333
| 58
| 0.557214
|
# Generated by Django 3.2.9 on 2021-11-08 04:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('memes', '0002_auto_20211108_1233'),
]
operations = [
migrations.RemoveField(
model_name='photo',
name='hashtag',
),
migrations.RemoveField(
model_name='photo',
name='uploader',
),
migrations.AlterField(
model_name='photo',
name='upload_date',
field=models.DateTimeField(auto_now_add=True),
),
]
| 0
| 489
| 23
|
2166ee9410003528b21dcef8b26807deef3d0e7b
| 1,546
|
py
|
Python
|
uav.py
|
Aniq55/DroneSim
|
32cc5c40eefa542f1260e922567f854602ee66f4
|
[
"MIT"
] | 5
|
2018-06-10T04:58:29.000Z
|
2022-02-03T08:22:41.000Z
|
uav.py
|
Aniq55/DroneSim
|
32cc5c40eefa542f1260e922567f854602ee66f4
|
[
"MIT"
] | null | null | null |
uav.py
|
Aniq55/DroneSim
|
32cc5c40eefa542f1260e922567f854602ee66f4
|
[
"MIT"
] | 2
|
2018-06-12T04:49:49.000Z
|
2020-06-27T19:59:48.000Z
|
from constants import *
import time
import threading
from chaos import *
| 35.136364
| 87
| 0.498706
|
from constants import *
import time
import threading
from chaos import *
class UAV():
def __init__(self, ID, x, y, velx, vely):
self.ID= ID
self.x= x
self.y= y
self.velx= velx
self.vely= vely
self.rescued = []
self._time_ = time.time()
self.init_time = self._time_
def update_position(self, time_elapsed):
self.x= ( self.x + self.velx*time_elapsed*random_val() )%L
self.y= ( self.y + self.vely*time_elapsed*random_val() )%L
def search_survivors(self):
self.init_time = time.time()
while len(SURVIVORS)>0:
x_lower, x_upper = self.x - RANGE, self.x + RANGE
y_lower, y_upper = self.y - RANGE, self.y + RANGE
filtered= [s for s in SURVIVORS if s.x > x_lower and
s.x < x_upper and
s.y > y_lower and
s.y < y_upper and
s.marked_safe == False]
for f in filtered:
f.marked_safe = True
self.rescued.append(f)
SURVIVORS.remove(f)
time.sleep(0.5)
self.update_position(time.time()- self._time_)
self._time_ = time.time()
# print(len(filtered), time.time())
print(len(SURVIVORS), time.time()- self.init_time)
output_file.write("%d, %f\n"%(len(SURVIVORS), time.time()- self.init_time))
| 1,378
| -9
| 104
|
fd2d2d27a90eb687cfa5ddaaf7a717a930d940df
| 2,951
|
py
|
Python
|
ldap_sync/__main__.py
|
JuKu/pycroft
|
15595f9b4327da5c52c77174def73660226da7dc
|
[
"Apache-2.0"
] | null | null | null |
ldap_sync/__main__.py
|
JuKu/pycroft
|
15595f9b4327da5c52c77174def73660226da7dc
|
[
"Apache-2.0"
] | null | null | null |
ldap_sync/__main__.py
|
JuKu/pycroft
|
15595f9b4327da5c52c77174def73660226da7dc
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import logging
import os
from .exporter import add_stdout_logging, establish_and_return_ldap_connection, \
establish_and_return_session, fake_connection, fetch_current_ldap_users, \
fetch_users_to_sync, get_config_or_exit, logger, sync_all
logger = logging.getLogger('ldap_sync')
NAME_LEVEL_MAPPING = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
parser = argparse.ArgumentParser(description="Pycroft ldap syncer")
parser.add_argument('--fake', dest='fake', action='store_true', default=False,
help="Use a mocked LDAP backend")
parser.add_argument("-l", "--log", dest='loglevel', type=str,
choices=list(NAME_LEVEL_MAPPING.keys()), default='info',
help="Set the loglevel")
parser.add_argument("-d", "--debug", dest='loglevel', action='store_const',
const='debug', help="Short for --log=debug")
if __name__ == '__main__':
exit(main())
| 30.42268
| 91
| 0.683497
|
import argparse
import logging
import os
from .exporter import add_stdout_logging, establish_and_return_ldap_connection, \
establish_and_return_session, fake_connection, fetch_current_ldap_users, \
fetch_users_to_sync, get_config_or_exit, logger, sync_all
logger = logging.getLogger('ldap_sync')
def sync_production():
logger.info("Starting the production sync. See --help for other options.")
config = get_config_or_exit(required_property='ldap')
db_users = fetch_users_to_sync(
session=establish_and_return_session(config.db_uri),
required_property=config.required_property,
)
logger.info("Fetched %s database users", len(db_users))
connection = establish_and_return_ldap_connection(
host=config.host,
port=config.port,
bind_dn=config.bind_dn,
bind_pw=config.bind_pw,
)
ldap_users = fetch_current_ldap_users(connection, base_dn=config.base_dn)
logger.info("Fetched %s ldap users", len(ldap_users))
sync_all(db_users, ldap_users, connection, base_dn=config.base_dn)
def sync_fake():
logger.info("Starting sync using a mocked LDAP backend. See --help for other options.")
try:
db_uri = os.environ['PYCROFT_DB_URI']
except KeyError:
logger.critical('PYCROFT_DB_URI not set')
exit()
db_users = fetch_users_to_sync(
session=establish_and_return_session(db_uri)
)
logger.info("Fetched %s database users", len(db_users))
connection = fake_connection()
BASE_DN = 'ou=pycroft,dc=agdsn,dc=de'
logger.debug("BASE_DN set to %s", BASE_DN)
ldap_users = fetch_current_ldap_users(connection, base_dn=BASE_DN)
logger.info("Fetched %s ldap users", len(ldap_users))
sync_all(db_users, ldap_users, connection, base_dn=BASE_DN)
NAME_LEVEL_MAPPING = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
parser = argparse.ArgumentParser(description="Pycroft ldap syncer")
parser.add_argument('--fake', dest='fake', action='store_true', default=False,
help="Use a mocked LDAP backend")
parser.add_argument("-l", "--log", dest='loglevel', type=str,
choices=list(NAME_LEVEL_MAPPING.keys()), default='info',
help="Set the loglevel")
parser.add_argument("-d", "--debug", dest='loglevel', action='store_const',
const='debug', help="Short for --log=debug")
def main():
args = parser.parse_args()
add_stdout_logging(logger, level=NAME_LEVEL_MAPPING[args.loglevel])
try:
if args.fake:
sync_fake()
else:
sync_production()
except KeyboardInterrupt:
logger.fatal("SIGINT received, stopping.")
logger.info("Re-run the syncer to retain a consistent state.")
return 1
return 0
if __name__ == '__main__':
exit(main())
| 1,821
| 0
| 69
|
a20fcaf6ccf8820b917742d329e834e07689579f
| 6,837
|
py
|
Python
|
visdex/exploratory_graphs/__init__.py
|
mcraig-ibme/visdex
|
bbf8365e627f6d52fb201ae4ae6fef6775c4d716
|
[
"Apache-2.0"
] | null | null | null |
visdex/exploratory_graphs/__init__.py
|
mcraig-ibme/visdex
|
bbf8365e627f6d52fb201ae4ae6fef6775c4d716
|
[
"Apache-2.0"
] | null | null | null |
visdex/exploratory_graphs/__init__.py
|
mcraig-ibme/visdex
|
bbf8365e627f6d52fb201ae4ae6fef6775c4d716
|
[
"Apache-2.0"
] | null | null | null |
"""
visdex: Exploratory graphs
The exploratory graphs section defines specialised data visualisations that
can be generated by the user on request
"""
import logging
from dash import html, dcc
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State, MATCH
import plotly.graph_objects as go
from . import (
bar_graph,
histogram_graph,
manhattan_graph,
scatter_graph,
violin_graph,
)
from visdex.common import standard_margin_left, vstack, plot_style
LOG = logging.getLogger(__name__)
def generate_generic_group(n_clicks, group_type):
"""
The generic builder for each of the component types.
:param n_clicks:
:param group_type:
:param component_list:
:return:
"""
LOG.info(f"generate_generic_group {group_type}")
children = list()
component_list = all_components[group_type]
for component in component_list:
name = component["id"]
args_to_replicate = dict(component)
del args_to_replicate["component_type"]
del args_to_replicate["id"]
del args_to_replicate["label"]
# Generate each component with the correct id, index, and arguments, inside its
# own Div.
children.append(
html.Div(
[
component["label"] + ":",
component["component_type"](
id={"type": group_type + "-" + name, "index": n_clicks},
**args_to_replicate,
),
],
id={"type": "div-" + group_type + "-" + name, "index": n_clicks},
style=plot_style,
)
)
children.append(
dcc.Graph(
id={"type": "gen-" + group_type + "-graph", "index": n_clicks},
figure=go.Figure(data=go.Scatter()),
)
)
LOG.debug(f"{children}")
return html.Div(
id={"type": "filter-graph-group-" + group_type, "index": n_clicks},
children=children,
)
| 34.356784
| 91
| 0.511482
|
"""
visdex: Exploratory graphs
The exploratory graphs section defines specialised data visualisations that
can be generated by the user on request
"""
import logging
from dash import html, dcc
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State, MATCH
import plotly.graph_objects as go
from . import (
bar_graph,
histogram_graph,
manhattan_graph,
scatter_graph,
violin_graph,
)
from visdex.common import standard_margin_left, vstack, plot_style
LOG = logging.getLogger(__name__)
def get_layout(app):
@app.callback(
[
Output("explore-collapse", "is_open"),
Output("collapse-explore-button", "children"),
],
[Input("collapse-explore-button", "n_clicks")],
[State("explore-collapse", "is_open")],
prevent_initial_call=True,
)
def toggle_collapse_explore(n, is_open):
"""
Handle click on the 'Explore' expand/collapse button
"""
LOG.info(f"toggle_collapse_explore {n} {is_open}")
if n:
return not is_open, "+" if is_open else "-"
return is_open, "-"
@app.callback(
Output("graph-group-container", "children"),
[Input("add-graph-button", "n_clicks")],
[State("graph-group-container", "children")],
prevent_initial_call=True,
)
def add_graph_group(n_clicks, children):
# Add a new graph group each time the button is clicked. The if None guard stops
# there being an initial graph.
LOG.info(f"add_graph_group")
if n_clicks is not None:
# This dropdown controls what type of graph-group to display next to it.
new_graph_type_dd = html.Div(
[
"Graph type:",
dcc.Dropdown(
id={"type": "graph-type-dd", "index": n_clicks},
options=[
{"label": str(value).capitalize(), "value": value}
for value in all_components.keys()
],
value="scatter",
style={"width": "50%"},
),
# This is a placeholder for the 'filter-graph-group-scatter' or
# 'filter-graph-group-bar' to be placed here.
# Because graph-type-dd above is set to Scatter, this will initially be
# automatically filled with a filter-graph-group-scatter.
# But on the initial generation of this object, we give it type
# 'placeholder' to make it easy to check its value in
# change_graph_group_type()
html.Div(id={"type": "placeholder", "index": n_clicks}),
],
id={"type": "divgraph-type-dd", "index": n_clicks},
style=vstack,
)
children.append(new_graph_type_dd)
return children
@app.callback(
Output({"type": "divgraph-type-dd", "index": MATCH}, "children"),
[Input({"type": "graph-type-dd", "index": MATCH}, "value")],
[
State({"type": "graph-type-dd", "index": MATCH}, "id"),
State({"type": "divgraph-type-dd", "index": MATCH}, "children"),
],
)
def change_graph_group_type(graph_type, id, children):
LOG.info(f"change_graph_group_type {graph_type} {id}")
# Generate a new group of the right type.
if "filter-graph-group-" + str(graph_type) != children[-1]["props"]["id"]["type"]:
children[-1] = generate_generic_group(id["index"], graph_type)
return children
bar_graph.define_cbs(app)
histogram_graph.define_cbs(app)
manhattan_graph.define_cbs(app)
scatter_graph.define_cbs(app)
violin_graph.define_cbs(app)
return html.Div(children=[
html.Div(
[
dbc.Button(
"+",
id="collapse-explore-button",
style={
"display": "inline-block",
"margin-left": "10px",
"width": "40px",
"vertical-align" : "middle",
},
),
html.H2(
"Exploratory graphs",
style={
"display": "inline-block",
"margin-left": standard_margin_left,
"margin-bottom": "0",
"vertical-align" : "middle",
},
),
],
),
dbc.Collapse(
id="explore-collapse",
children=[
# Container to hold all the exploratory graphs
html.Div(id="graph-group-container", children=[]),
# Button at the page bottom to add a new graph
html.Button(
"New Graph",
id="add-graph-button",
style={
"margin-top": "10px",
"margin-left": standard_margin_left,
"margin-bottom": "40px",
},
),
],
is_open=False,
),
])
def generate_generic_group(n_clicks, group_type):
"""
The generic builder for each of the component types.
:param n_clicks:
:param group_type:
:param component_list:
:return:
"""
LOG.info(f"generate_generic_group {group_type}")
children = list()
component_list = all_components[group_type]
for component in component_list:
name = component["id"]
args_to_replicate = dict(component)
del args_to_replicate["component_type"]
del args_to_replicate["id"]
del args_to_replicate["label"]
# Generate each component with the correct id, index, and arguments, inside its
# own Div.
children.append(
html.Div(
[
component["label"] + ":",
component["component_type"](
id={"type": group_type + "-" + name, "index": n_clicks},
**args_to_replicate,
),
],
id={"type": "div-" + group_type + "-" + name, "index": n_clicks},
style=plot_style,
)
)
children.append(
dcc.Graph(
id={"type": "gen-" + group_type + "-graph", "index": n_clicks},
figure=go.Figure(data=go.Scatter()),
)
)
LOG.debug(f"{children}")
return html.Div(
id={"type": "filter-graph-group-" + group_type, "index": n_clicks},
children=children,
)
| 4,786
| 0
| 23
|
eebf786325342f19a4237a7fea589022310860b1
| 4,982
|
py
|
Python
|
intel_software/pkg_contents/micperf/CONTENTS/usr/share/micperf/micp/micp/kernels/mkl_conv.py
|
antoinecarme/xeon-phi-data
|
883a6e2f31b2e729715303725f417b2990d923be
|
[
"BSD-3-Clause"
] | 1
|
2021-07-22T18:01:28.000Z
|
2021-07-22T18:01:28.000Z
|
intel_software/pkg_contents/micperf/CONTENTS/usr/share/micperf/micp/micp/kernels/mkl_conv.py
|
antoinecarme/xeon-phi-data
|
883a6e2f31b2e729715303725f417b2990d923be
|
[
"BSD-3-Clause"
] | null | null | null |
intel_software/pkg_contents/micperf/CONTENTS/usr/share/micperf/micp/micp/kernels/mkl_conv.py
|
antoinecarme/xeon-phi-data
|
883a6e2f31b2e729715303725f417b2990d923be
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2012-2017, Intel Corporation, All Rights Reserved.
#
# This software is supplied under the terms of a license
# agreement or nondisclosure agreement with Intel Corp.
# and may not be copied or disclosed except in accordance
# with the terms of that agreement.
import os
import re
import micp.kernel as micp_kernel
import micp.info as micp_info
import micp.common as micp_common
import micp.params as micp_params
from micp.common import mp_print, get_ln, CAT_ERROR
confParamNames = [ 'groups', 'nImg', 'inpWidth', 'inpHeight', 'nIfm', \
'nOfm', 'kw', 'kh', 'stride', 'pad', 'iters' ]
optimalParamValues = '1 16 224 224 3 64 7 7 2 3 100'
# expected minimal number of parsed scores in output
CONST_expected_perf_scores = 3
# expected number of "|"-separated sections in output
CONST_expected_sections = 2
# expected measurements per row
CONST_expected_meas_per_row = 4
| 35.585714
| 119
| 0.609193
|
# Copyright 2012-2017, Intel Corporation, All Rights Reserved.
#
# This software is supplied under the terms of a license
# agreement or nondisclosure agreement with Intel Corp.
# and may not be copied or disclosed except in accordance
# with the terms of that agreement.
import os
import re
import micp.kernel as micp_kernel
import micp.info as micp_info
import micp.common as micp_common
import micp.params as micp_params
from micp.common import mp_print, get_ln, CAT_ERROR
confParamNames = [ 'groups', 'nImg', 'inpWidth', 'inpHeight', 'nIfm', \
'nOfm', 'kw', 'kh', 'stride', 'pad', 'iters' ]
optimalParamValues = '1 16 224 224 3 64 7 7 2 3 100'
# expected minimal number of parsed scores in output
CONST_expected_perf_scores = 3
# expected number of "|"-separated sections in output
CONST_expected_sections = 2
# expected measurements per row
CONST_expected_meas_per_row = 4
class mkl_conv(micp_kernel.Kernel):
def __init__(self):
optimalParamsString = ''
self._categoryParams = {}
info = micp_info.Info()
maxCount = info.num_cores()
self.name = 'mkl_conv'
self.param_validator = micp_params.NO_VALIDATOR
# for ease of use, split params into two lists
self._paramNames = ['omp_num_threads', 'with_padding', 'output']
self._paramNames.extend(confParamNames)
self._paramDefaults = {'omp_num_threads':str(maxCount),
'with_padding':'0',
'output':'--original-output'}
for (idx, val) in enumerate(optimalParamValues.split(' ')):
optimalParamsString += '--{0} {1} '.format(confParamNames[idx], val)
self._paramDefaults[confParamNames[idx]] = val
self._categoryParams['test'] = [ optimalParamsString ]
self._categoryParams['optimal'] = [ optimalParamsString ]
self._categoryParams['optimal_quick'] = self._categoryParams['optimal']
self._categoryParams['scaling'] = self._categoryParams['optimal']
self._categoryParams['scaling_quick'] = self._categoryParams['optimal']
# scale with step 10
coreConfig = range(1, maxCount, 10)
self._categoryParams['scaling_core'] = \
[ ' '.join(['--omp_num_threads {0}'.format(cc), optimalParamsString]) \
for cc in coreConfig]
def path_host_exec(self, offload_method):
if offload_method == 'local':
return self._path_exec(micp_kernel.LIBEXEC_HOST, "std_conv_bench")
else:
return None
def _do_unit_test(self):
return True
def offload_methods(self):
return ['local']
def param_type(self):
return 'pos'
def independent_var(self, category):
return 'omp_num_threads'
def param_for_env(self):
return ['omp_num_threads']
def path_dev_exec(self, offType):
""" Intel Xeon Phi Coprocessors is not supported """
return None
def environment_host(self):
return {'LD_LIBRARY_PATH':self.ld_library_path(),
'KMP_PLACE_THREADS':'1T',
'KMP_AFFINITY':'compact,granularity=fine'}
def get_process_modifiers(self):
info = micp_info.Info()
if info.is_processor_mcdram_available():
return ['numactl', '--membind=1']
else:
return []
def parse_desc(self, raw):
res_line = raw.splitlines()
# get general parameters before '|' character
try:
out_sections = res_line[1].rsplit("|", 1)
except IndexError:
micp_kernel.raise_parse_error(raw)
if len(out_sections) != CONST_expected_sections:
micp_kernel.raise_parse_error(raw)
return out_sections[0].strip()
def parse_perf(self, raw):
res_lines = raw.splitlines()
result = {}
for line in res_lines:
# example one line of output:
# FWD w/ padding in flops min(ms) 0.01; max(gflop/s) 2.70;avg(ms) 0.02; avg(gflop/s) 1.58;
# ex. ( FWD )
propagation = re.search('([F|B]WD[A-Z_]*)', line)
# ex. (avg ) ((gflops/s)) (1.58 )
values = re.findall('([a-zA-Z]*)\(([a-zA-Z/]*)\)\s*([0-9]*\.[0-9]*)', line)
# skip text data lines
if not (propagation and values):
continue
# check syntax (4 measurements per row)
if len(values) != CONST_expected_meas_per_row:
micp_kernel.raise_parse_error(raw)
propag_txt = propagation.group(0)
for (prop, unit, value) in values:
if prop != 'avg':
continue
if unit == 'gflop/s':
result['Computation.Avg.{0}'.format(propag_txt)] = {'value':value, 'units':'GFlops', 'rollup':True}
if len(result) != CONST_expected_perf_scores:
micp_kernel.raise_parse_error(raw)
return result
| 3,629
| 430
| 23
|
5499e89c9e89f497892f031f5a9cc83e7deaabf6
| 610
|
py
|
Python
|
wfsim/utils.py
|
jmeyers314/wfsim
|
c2ad60c100ec1c4046368801a56a5211499f0c51
|
[
"BSD-3-Clause"
] | null | null | null |
wfsim/utils.py
|
jmeyers314/wfsim
|
c2ad60c100ec1c4046368801a56a5211499f0c51
|
[
"BSD-3-Clause"
] | null | null | null |
wfsim/utils.py
|
jmeyers314/wfsim
|
c2ad60c100ec1c4046368801a56a5211499f0c51
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import galsim
def BBSED(T):
"""(unnormalized) Blackbody SED for temperature T in Kelvin.
"""
waves_nm = np.arange(330.0, 1120.0, 10.0)
flambda = planck(T, waves_nm*1e-9)
return galsim.SED(
galsim.LookupTable(waves_nm, flambda),
wave_type='nm',
flux_type='flambda'
)
| 27.727273
| 64
| 0.57377
|
import numpy as np
import galsim
def BBSED(T):
"""(unnormalized) Blackbody SED for temperature T in Kelvin.
"""
waves_nm = np.arange(330.0, 1120.0, 10.0)
def planck(t, w):
# t in K
# w in m
c = 2.99792458e8 # speed of light in m/s
kB = 1.3806488e-23 # Boltzmann's constant J per Kelvin
h = 6.62607015e-34 # Planck's constant in J s
return w**(-5) / (np.exp(h*c/(w*kB*t))-1)
flambda = planck(T, waves_nm*1e-9)
return galsim.SED(
galsim.LookupTable(waves_nm, flambda),
wave_type='nm',
flux_type='flambda'
)
| 249
| 0
| 26
|
a5f484ac8ab36970a0402fcb7d92a67abbe863f9
| 1,495
|
py
|
Python
|
src/app/search.py
|
delgadofarid/my-first-search-engine
|
e8ea909030a599bb4bba739fe77747c98395dc29
|
[
"Apache-2.0"
] | 1
|
2021-06-05T03:52:21.000Z
|
2021-06-05T03:52:21.000Z
|
src/app/search.py
|
delgadofarid/my-first-search-engine
|
e8ea909030a599bb4bba739fe77747c98395dc29
|
[
"Apache-2.0"
] | null | null | null |
src/app/search.py
|
delgadofarid/my-first-search-engine
|
e8ea909030a599bb4bba739fe77747c98395dc29
|
[
"Apache-2.0"
] | null | null | null |
import re
from elasticsearch import Elasticsearch, helpers
from itertools import islice
# initialize Elasticsearch client
es = Elasticsearch()
| 30.510204
| 103
| 0.626756
|
import re
from elasticsearch import Elasticsearch, helpers
from itertools import islice
# initialize Elasticsearch client
es = Elasticsearch()
def first_n(iterable, n):
return islice(iterable, 0, n)
def format_es_response(user_question, es_candidates):
results = list()
for c in es_candidates:
par = dict()
par['questionText'] = user_question
par['bookTitle'] = c['_source']['bookTitle']
par['paragraphText'] = c['_source']['paragraphText']
par['esScore'] = c['_score']
par['paragraphId'] = c['_source']['paragraphId']
par['bookURL'] = c['_source']['bookURL']
par['bookId'] = c['_source']['bookId']
results.append(par)
return results
def search_candidates(user_question, index_name="wikibooks-search-index", size=20, es=Elasticsearch()):
match_queries = [
{"match": {"bookTitle": user_question}},
{"match": {"paragraphText": user_question}}
]
quoted_text = re.findall('"([^"]*)"', user_question)
for text in quoted_text:
match_queries.append({"match_phrase": {"bookTitle": text}})
match_queries.append({"match_phrase": {"paragraphText": text}})
es_query = {
"query": {
"bool": {
"should": match_queries
}
}
}
results = helpers.scan(es, query=es_query, index=index_name, preserve_order=True)
results = first_n(results, size)
return format_es_response(user_question, results)
| 1,279
| 0
| 69
|
5786c329b92403e4f8b652789de8bbe26502cea4
| 24,221
|
py
|
Python
|
tests/test_configfetch.py
|
openandclose/configfetch
|
fc0b329e6861cc73f0a108ddaea636e6956dd56f
|
[
"MIT"
] | null | null | null |
tests/test_configfetch.py
|
openandclose/configfetch
|
fc0b329e6861cc73f0a108ddaea636e6956dd56f
|
[
"MIT"
] | null | null | null |
tests/test_configfetch.py
|
openandclose/configfetch
|
fc0b329e6861cc73f0a108ddaea636e6956dd56f
|
[
"MIT"
] | null | null | null |
import argparse
import configparser
import functools
import textwrap
import pytest
import configfetch
fetch_ = configfetch.fetch
fetch = functools.partial(
configfetch.fetch, option_builder=configfetch.FiniOptionBuilder)
# blank string returns ``None``
# Just checking the standard library's behaviors.
class _CustomFunc(configfetch.Func):
"""Used the test below."""
@configfetch.register
| 24.842051
| 85
| 0.473886
|
import argparse
import configparser
import functools
import textwrap
import pytest
import configfetch
fetch_ = configfetch.fetch
fetch = functools.partial(
configfetch.fetch, option_builder=configfetch.FiniOptionBuilder)
def f(string):
return textwrap.dedent(string.strip('\n'))
def _get_action(conf, option_strings):
parser = argparse.ArgumentParser(prog='test')
conf.build_arguments(parser)
# parser.print_help()
for action in parser._get_optional_actions():
if option_strings in action.option_strings:
return action
raise ValueError('No action with option_strings: %r' % option_strings)
class TestEscapedSplit:
def check_comma(self, value, expected):
ret = configfetch._parse_comma(value)
assert ret == expected
def check_line(self, value, expected):
ret = configfetch._parse_line(value)
assert ret == expected
def test_comma(self):
self.check_comma('aaaa', ['aaaa'])
self.check_comma(r'\aaaa', [r'\aaaa'])
self.check_comma(r'aa\aa', [r'aa\aa'])
self.check_comma(r'aaa\a', [r'aaa\a'])
self.check_comma(r'aaaa\\', [r'aaaa\\'])
self.check_comma(r'aa\\aa', [r'aa\\aa'])
self.check_comma(r'aa\\\aa', [r'aa\\\aa'])
self.check_comma('aa, bb', ['aa', 'bb'])
self.check_comma(r'aa\, bb', ['aa, bb'])
self.check_comma(r'aa\\, bb', [r'aa\, bb'])
self.check_comma(r'aa\\\, bb', [r'aa\\, bb'])
self.check_comma(r'aa\a, bb', [r'aa\a', 'bb'])
self.check_comma(r'aa\\a, bb', [r'aa\\a', 'bb'])
self.check_comma(r'aa\\\a, bb', [r'aa\\\a', 'bb'])
self.check_comma(',aa', ['aa'])
self.check_comma('aa,', ['aa'])
self.check_comma('aa,,', ['aa'])
def test_line(self):
self.check_line('aa\nbb', ['aa', 'bb'])
self.check_line('aa\\\nbb', ['aa\nbb'])
self.check_line('aa\\\\\nbb', ['aa\\\nbb'])
self.check_line('aa\\\\\\\nbb', ['aa\\\\\nbb'])
self.check_line('aa\nbb,', ['aa', 'bb,'])
class TestInheritance:
def test_iter(self):
data = f("""
[sec1]
[sec2]
""")
conf = fetch(data)
assert list(conf.__iter__()) == ['DEFAULT', 'sec1', 'sec2']
def test_iter_option(self):
data = f("""
[sec1]
aa = xxx
bb = yyy
""")
conf = fetch(data)
assert list(conf.sec1.__iter__()) == ['aa', 'bb']
def test_contains(self):
data = f("""
[sec1]
[sec2]
""")
conf = fetch(data)
assert 'sec2' in conf
def test_contains_option(self):
data = f("""
[sec1]
aa = xxx
bb = yyy
""")
conf = fetch(data)
assert 'bb' in conf.sec1
class TestParseConfig:
def test_conf_str(self):
data = f("""
[sec1]
aa = xxx
""")
conf = fetch(data)
assert conf.sec1.aa == 'xxx'
def test_conf_str_blank(self):
data = f("""
[sec1]
""")
conf = fetch(data)
with pytest.raises(configfetch.NoOptionError):
assert conf.sec1.aa == ''
def test_conf_str_nosection(self):
data = f("""
[sec1]
aa = xxx
""")
conf = fetch(data)
with pytest.raises(configfetch.NoSectionError):
assert conf.sec2
def test_conf_str_default(self):
data = f("""
[DEFAULT]
aa = xxx
[sec1]
""")
conf = fetch(data)
assert conf.sec1.aa == 'xxx'
def test_conf_str_default_nosection(self):
data = f("""
[DEFAULT]
aa = xxx
""")
conf = fetch(data)
with pytest.raises(configfetch.NoSectionError):
assert conf.sec1.aa == 'xxx'
def test_conf_str_default_read_section(self):
data = f("""
[DEFAULT]
aa = xxx
""")
conf = fetch(data)
data = f("""
[sec1]
""")
conf._config.read_string(data)
assert conf.sec1.aa == 'xxx'
def test_conf_str_default_blank(self):
data = f("""
[DEFAULT]
[sec1]
""")
conf = fetch(data)
with pytest.raises(configfetch.NoOptionError):
assert conf.sec1.aa == ''
def test_conf_str_default_blank_nosection(self):
data = ''
conf = fetch(data)
with pytest.raises(configfetch.NoSectionError):
assert conf.sec1.aa == ''
def test_conf_bool(self):
data = f("""
[sec1]
aa = :: f: bool
Yes
""")
conf = fetch(data)
assert conf.sec1.aa is True
def test_conf_bool_no(self):
data = f("""
[sec1]
aa = :: f: bool
No
""")
conf = fetch(data)
assert conf.sec1.aa is False
# blank string returns ``None``
def test_conf_bool_blank(self):
data = f("""
[sec1]
aa = :: f: bool
""")
conf = fetch(data)
assert conf.sec1.aa is None
def test_conf_comma(self):
data = f("""
[sec1]
aa = :: f: comma
xxx1, xxx2, xxx3
""")
conf = fetch(data)
assert conf.sec1.aa == ['xxx1', 'xxx2', 'xxx3']
def test_conf_comma_indent(self):
data = f("""
[sec1]
aa = :: f: comma
xxx1, xxx2,
xxx3
""")
conf = fetch(data)
assert conf.sec1.aa == ['xxx1', 'xxx2', 'xxx3']
def test_conf_comma_newline(self):
data = f("""
[sec1]
aa = :: f: comma
xxx1, xxx2
xxx3
""")
conf = fetch(data)
assert conf.sec1.aa == ['xxx1', 'xxx2\nxxx3']
def test_conf_comma_blank(self):
data = f("""
[sec1]
aa = :: f: comma
""")
conf = fetch(data)
assert conf.sec1.aa == []
def test_conf_line(self):
data = f("""
[sec1]
aa = :: f: line
xxx1
xxx2
xxx3
""")
conf = fetch(data)
assert conf.sec1.aa == ['xxx1', 'xxx2', 'xxx3']
def test_conf_line_comma(self):
data = f("""
[sec1]
aa = :: f: line
xxx1
xxx2
xxx3, xxx4
""")
conf = fetch(data)
assert conf.sec1.aa == ['xxx1', 'xxx2', 'xxx3, xxx4']
def test_conf_line_blank(self):
data = f("""
[sec1]
aa = :: f: line
""")
conf = fetch(data)
assert conf.sec1.aa == []
def test_conf_line_multiblanks(self):
data = f("""
[sec1]
aa = :: f: line
""")
conf = fetch(data)
assert conf.sec1.aa == []
def test_conf_bar_comma(self):
data = f("""
[sec1]
aa = :: f: comma, bar
xxx1, xxx2, xxx3
""")
conf = fetch(data)
assert conf.sec1.aa == 'xxx1|xxx2|xxx3'
def test_conf_bar_comma_blank(self):
data = f("""
[sec1]
aa = :: f: comma, bar
""")
conf = fetch(data)
assert conf.sec1.aa == ''
def test_conf_bar_comma_blank_spaces(self):
data = f("""
[sec1]
aa = :: f: comma, bar
""")
conf = fetch(data)
assert conf.sec1.aa == ''
def test_conf_bar_line(self):
data = f("""
[sec1]
aa = :: f: line, bar
xxx1
xxx2
xxx3
""")
conf = fetch(data)
assert conf.sec1.aa == 'xxx1|xxx2|xxx3'
def test_conf_bar_line_blank(self):
data = f("""
[sec1]
aa = :: f: line, bar
""")
conf = fetch(data)
assert conf.sec1.aa == ''
def test_conf_bar_line_blank_spaces(self):
data = f("""
[sec1]
aa = :: f: line, bar
""")
conf = fetch(data)
assert conf.sec1.aa == ''
def test_conf_cmd(self):
data = f("""
[sec1]
aa = :: f: cmd
--aaa -b "ccc cc" ddd,dd
""")
conf = fetch(data)
assert conf.sec1.aa == ['--aaa', '-b', 'ccc cc', 'ddd,dd']
def test_conf_cmds(self):
data = f("""
[sec1]
aa = :: f: line, cmds
ls *.txt
find . "aaa"
""")
conf = fetch(data)
assert conf.sec1.aa == [['ls', '*.txt'], ['find', '.', 'aaa']]
def test_conf_fmt(self):
data = f("""
[sec1]
aa = :: f: fmt
{USER}/data/my.css
""")
conf = fetch(data, fmts={'USER': '/home/john'})
assert conf.sec1.aa == '/home/john/data/my.css'
class TestParseContexts:
def test_ctx_default_bool(self):
data = f("""
[DEFAULT]
aa = :: f: bool
no
[sec1]
""")
conf = fetch(data)
assert conf.sec1.aa is False
def test_ctx_default_bool_noop(self):
data = f("""
[DEFAULT]
aa = :: f: bool
[sec1]
aa = no
""")
conf = fetch(data)
assert conf.sec1.aa is False
def test_ctx_default_comma(self):
data = f("""
[DEFAULT]
aa = :: f: comma
[sec1]
aa = xxx1, xxx2, xxx3
""")
conf = fetch(data)
assert conf.sec1.aa == ['xxx1', 'xxx2', 'xxx3']
class TestParseFunc:
def test_func_newline(self):
data = f("""
[sec1]
aa =
:: f: bool
no
""")
conf = fetch(data)
assert conf.sec1.aa is False
# Just checking the standard library's behaviors.
class TestConfigParser:
def test_indent(self):
data = f("""
[sec1]
aa =
xxx
""")
config = configparser.ConfigParser()
config.read_string(data)
assert config['sec1']['aa'] == '\nxxx'
data = f("""
[sec1]
aa =
xxx
""")
config = configparser.ConfigParser()
with pytest.raises(configparser.ParsingError):
config.read_string(data)
def test_allow_no_value(self):
data = f("""
[sec1]
aa =
:: f: bool
no
""")
config = configparser.ConfigParser(allow_no_value=True)
config.read_string(data)
assert config['sec1']['aa'] == '\n:: f: bool\nno'
class TestArgparse:
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--aa')
parser.add_argument('-b', '--bb')
parser.add_argument('-c', '--cc', action='store_const', default='', const='yes')
parser.add_argument('-d', '--no-cc', action='store_const', const='no', dest='cc')
parser.add_argument('-e', '--ee-eee')
def get_args(self, cmd):
return self.parser.parse_args(cmd)
def test_args_and_conf(self):
data = f("""
[sec1]
aa = xxx
""")
args = self.get_args(['--aa', 'axxx'])
conf = fetch(data, args=args)
assert conf.sec1.aa == 'axxx'
def test_args_and_conf_short(self):
data = f("""
[sec1]
aa = xxx
""")
args = self.get_args(['-a', 'axxx'])
conf = fetch(data, args=args)
assert conf.sec1.aa == 'axxx'
def test_args_and_conf_none(self):
data = f("""
[sec1]
aa = xxx
""")
args = self.get_args([])
conf = fetch(data, args=args)
assert conf.sec1.aa == 'xxx'
def test_args_and_conf_const(self):
data = f("""
[sec1]
cc = :: f: bool
""")
args = self.get_args(['--cc'])
conf = fetch(data, args=args)
assert conf.sec1.cc is True
def test_args_and_conf_const_false(self):
data = f("""
[sec1]
cc = :: f: bool
true
""")
args = self.get_args(['--no-cc'])
conf = fetch(data, args=args)
assert conf.sec1.cc is False
def test_args_and_conf_dash(self):
data = f("""
[sec1]
ee_eee = xxx
""")
args = self.get_args(['-e', 'axxx'])
conf = fetch(data, args=args)
assert conf.sec1.ee_eee == 'axxx'
class _CustomFunc(configfetch.Func):
"""Used the test below."""
@configfetch.register
def custom(self, value):
return 'test'
class TestCustomize:
def test_customfunc(self):
data = f("""
[sec1]
aa = :: f: custom
xxx
""")
conf = fetch(data, Func=_CustomFunc)
assert conf.sec1.aa == 'test'
class TestDouble:
def test_nooption_nooption(self):
data = f("""
[sec1]
aa = xxx
""")
conf1 = fetch(data)
data = f("""
[sec1]
aa = yyy
""")
conf2 = fetch(data)
double = configfetch.Double(conf2.sec1, conf1.sec1)
with pytest.raises(configfetch.NoOptionError):
assert double.bb == 'zzz'
def test_nooption_blank(self):
data = f("""
[sec1]
aa = xxx
""")
conf1 = fetch(data)
data = f("""
[sec1]
bb =
""")
conf2 = fetch(data)
double = configfetch.Double(conf2.sec1, conf1.sec1)
assert double.bb == ''
def test_blank_nooption(self):
data = f("""
[sec1]
bb =
""")
conf1 = fetch(data)
data = f("""
[sec1]
aa = yyy
""")
conf2 = fetch(data)
double = configfetch.Double(conf2.sec1, conf1.sec1)
assert double.bb == ''
def test_blank_blank(self):
data = f("""
[sec1]
bb =
""")
conf1 = fetch(data)
data = f("""
[sec1]
bb = :: f: comma
""")
conf2 = fetch(data)
double = configfetch.Double(conf2.sec1, conf1.sec1)
assert double.bb == ''
def test_plus(self):
data = f("""
[sec1]
aa = :: f: plus
xxx, yyy
""")
conf1 = fetch(data)
data = f("""
[sec1]
aa = :: f: plus
-yyy
""")
conf2 = fetch(data)
double = configfetch.Double(conf2.sec1, conf1.sec1)
assert double.aa == ['xxx']
class TestGetPlusMinusValues:
initial = ['aaa', 'bbb', 'ccc']
def compare(self, adjusts, initial, expected):
values = configfetch._get_plusminus_values(adjusts, initial)
assert values == expected
def test_adjusts_argument(self):
args = (['ddd'], None, ['ddd'])
self.compare(*args)
args = (['+ddd'], None, ['ddd'])
self.compare(*args)
args = (['-bbb'], None, [])
self.compare(*args)
args = (['ddd'], self.initial, ['ddd'])
self.compare(*args)
args = (['+ddd'], self.initial, ['aaa', 'bbb', 'ccc', 'ddd'])
self.compare(*args)
args = (['-bbb'], self.initial, ['aaa', 'ccc'])
self.compare(*args)
args = (['-aaa, -bbb'], self.initial, ['ccc'])
self.compare(*args)
args = (['-aaa, +ddd, +eee'], self.initial,
['bbb', 'ccc', 'ddd', 'eee'])
self.compare(*args)
class TestMinusAdapter:
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--aa', action='store_const', const='A')
parser.add_argument('-b', '--bb', action='store_true')
parser.add_argument('-c', '--cc', action='store_false')
parser.add_argument('-d', '--dd', action='append')
parser.add_argument('-e', '--ee', action='append_const', const='E')
parser.add_argument('-f', '--ff', action='count')
parser.add_argument('-x', '--xx')
parser.add_argument('-y', '--yy', nargs=1)
def compare(self, args, new_args, matcher=None):
assert configfetch.minusadapter(self.parser, matcher, args) == new_args
def test(self):
# No Minus argument
args = ['--aa', '--xx', 'xxxx', '--bb']
new_args = ['--aa', '--xx', 'xxxx', '--bb']
self.compare(args, new_args)
# Minus argument
args = ['--aa', '--xx', '-xxxx', '--bb']
new_args = ['--aa', '--xx=-xxxx', '--bb']
self.compare(args, new_args)
# Minus with another StoreAction
args = ['--aa', '--xx', '-xxxx', '--yy', 'yyyy']
new_args = ['--aa', '--xx=-xxxx', '--yy', 'yyyy']
self.compare(args, new_args)
# Minus with AppendAction
args = ['--dd', '-dddd', '--xx', '-xxxx', '--bb']
new_args = ['--dd=-dddd', '--xx=-xxxx', '--bb']
self.compare(args, new_args)
# Minus, short option version
args = ['--aa', '-x', '-xxxx', '--bb']
new_args = ['--aa', '-x-xxxx', '--bb']
self.compare(args, new_args)
class TestParseArgs:
def test_help(self):
data = f("""
[sec1]
aa = : help string
:: f: comma
xxx1, xxx2
""")
conf = fetch(data)
args = conf._ctx['aa']['argparse']
assert args['help'] == 'help string'
def test_help_multilines(self):
data = f("""
[sec1]
aa = : This
: is a
: help.
:: f: comma
xxx1, xxx2
""")
conf = fetch(data)
args = conf._ctx['aa']['argparse']
assert args['help'] == 'This\nis a\nhelp.'
def test_help_multilines_blank(self):
# testing both ':' and ': '
data = f("""
[sec1]
aa = : This
: is a
:
:
: help.
:: f: comma
xxx1, xxx2
""")
conf = fetch(data)
args = conf._ctx['aa']['argparse']
assert args['help'] == 'This\nis a\n\n\nhelp.'
def test_help_and_choices(self):
data = f("""
[sec1]
aa = : help string
:: choices: ss, tt
tt
""")
conf = fetch(data)
args = conf._ctx['aa']['argparse']
assert args['help'] == 'help string'
assert args['choices'] == ['ss', 'tt']
class TestBuildArgs:
def test_help(self):
data = f("""
[sec1]
aa = : help string
:: f: comma
xxx1, xxx2
""")
conf = fetch(data)
action = _get_action(conf, '--aa')
assert isinstance(action, argparse._StoreAction)
assert action.help == 'help string'
def test_help_and_choices(self):
data = f("""
[sec1]
aa = : help string
:: choices: ss, tt
tt
""")
conf = fetch(data)
action = _get_action(conf, '--aa')
assert isinstance(action, argparse._StoreAction)
assert action.choices == ['ss', 'tt']
def test_names(self):
data = f("""
[sec1]
aa = : help string
:: names: a
true
""")
conf = fetch(data)
action = _get_action(conf, '--aa')
assert isinstance(action, argparse._StoreAction)
assert action.option_strings == ['-a', '--aa']
def test_bool(self):
data = f("""
[sec1]
aa = : help string
:: f: bool
yes
""")
conf = fetch(data)
action = _get_action(conf, '--aa')
assert isinstance(action, argparse._StoreConstAction)
assert action.const == 'yes'
def test_bool_opposite(self):
data = f("""
[sec1]
aa = : help string
:: f: bool
yes
no_aa = : help string2
:: dest: aa
:: f: bool
no
""")
conf = fetch(data)
parser = argparse.ArgumentParser(prog='test')
conf.build_arguments(parser)
namespace = parser.parse_args(['--aa'])
assert namespace.__dict__['aa'] == 'yes'
namespace = parser.parse_args(['--no-aa'])
assert namespace.__dict__['aa'] == 'no'
def test_bool_default_no(self):
data = f("""
[sec1]
overwrite = : help string
:: f: bool
no
""")
conf = fetch(data)
action = _get_action(conf, '--overwrite')
assert isinstance(action, argparse._StoreConstAction)
assert action.const == 'yes'
def test_bool_opposite_default_no(self):
data = f("""
[sec1]
overwrite = : help string
:: f: bool
no
no_overwrite = : help string2
:: dest: overwrite
:: f: bool
yes
""")
conf = fetch(data)
parser = argparse.ArgumentParser(prog='test')
conf.build_arguments(parser)
namespace = parser.parse_args(['--overwrite'])
assert namespace.__dict__['overwrite'] == 'yes'
namespace = parser.parse_args(['--no-overwrite'])
assert namespace.__dict__['overwrite'] == 'no'
class TestBuildArgsCommandlineOnly:
def test_int(self):
data = f("""
[sec1]
aa = : help string
:: default: 1
xxx
""")
conf = fetch(data)
action = _get_action(conf, '--aa')
assert isinstance(action, argparse._StoreAction)
assert action.default == 1
def test_int_like_string(self):
data = f("""
[sec1]
aa = : help string
:: default: '1'
xxx
""")
conf = fetch(data)
action = _get_action(conf, '--aa')
assert isinstance(action, argparse._StoreAction)
assert action.default == '1'
def test_type(self):
data = f("""
[sec1]
aa = : help string
:: type: int
42
""")
conf = fetch(data)
action = _get_action(conf, '--aa')
assert isinstance(action, argparse._StoreAction)
assert action.type == int
def test_suppress(self):
data = f("""
[DEFAULT]
aa = : argparse.SUPPRESS
:: default: argparse.SUPPRESS
[sec1]
aa = xxx
""")
conf = fetch(data)
action = _get_action(conf, '--aa')
assert isinstance(action, argparse._StoreAction)
assert action.help == argparse.SUPPRESS
assert action.default == argparse.SUPPRESS
assert conf.sec1.aa == 'xxx'
def test_print_data():
data = f("""
[DEFAULT]
aa = aaa
[sec1]
bb = bbb
cc = : help string
:: names: c
:: f: bool
ccc
dd =
""")
dict_string = f("""
{
'DEFAULT': {
'aa': {
'value': 'aaa',
},
},
'sec1': {
'bb': {
'value': 'bbb',
},
'cc': {
'argparse': {
'help': 'help string',
'names': ['c'],
},
'func': ['bool'],
'value': 'ccc',
},
'dd': {
'value': '',
},
},
}
""")
ini_string = f("""
[DEFAULT]
aa= aaa
[sec1]
bb= bbb
cc= ccc
dd=
""")
conf = fetch(data, option_builder=configfetch.FiniOptionBuilder)
printer = configfetch.ConfigPrinter
ret = []
printer(conf, print=ret.append).print_dict()
assert '\n'.join(ret) == dict_string[:-1]
ret = []
printer(conf, print=ret.append).print_ini()
assert '\n'.join(ret) == ini_string[:-1]
dict_ = eval(dict_string)
conf = fetch(dict_, option_builder=configfetch.DictOptionBuilder)
ret = []
printer(conf, print=ret.append).print_dict()
assert '\n'.join(ret) == dict_string[:-1]
ret = []
printer(conf, print=ret.append).print_ini()
assert '\n'.join(ret) == ini_string[:-1]
| 20,491
| 1,183
| 2,116
|
4b11f987288e4258a61dd4806f7718825b2bb273
| 2,254
|
py
|
Python
|
portal_gun/commands/ssh.py
|
Coderik/portal-gun
|
081020a46b16b649497bceb6c2435b1ba135b487
|
[
"MIT"
] | 69
|
2018-05-03T18:25:43.000Z
|
2021-02-10T11:37:28.000Z
|
portal_gun/commands/ssh.py
|
Coderik/portal-gun
|
081020a46b16b649497bceb6c2435b1ba135b487
|
[
"MIT"
] | 7
|
2018-09-19T06:39:11.000Z
|
2022-03-29T21:55:08.000Z
|
portal_gun/commands/ssh.py
|
Coderik/portal-gun
|
081020a46b16b649497bceb6c2435b1ba135b487
|
[
"MIT"
] | 11
|
2018-07-30T18:09:12.000Z
|
2019-10-03T15:36:13.000Z
|
import os
from portal_gun.commands.helpers import get_provider_config, get_portal_spec, get_portal_name, \
get_provider_from_portal
from portal_gun.context_managers.no_print import no_print
from .base_command import BaseCommand
from .handlers import create_handler
| 34.676923
| 107
| 0.726264
|
import os
from portal_gun.commands.helpers import get_provider_config, get_portal_spec, get_portal_name, \
get_provider_from_portal
from portal_gun.context_managers.no_print import no_print
from .base_command import BaseCommand
from .handlers import create_handler
class SshCommand(BaseCommand):
DEFAULT_TMUX_SESSION = 'portal'
def __init__(self, args):
BaseCommand.__init__(self, args)
@staticmethod
def cmd():
return 'ssh'
@classmethod
def add_subparser(cls, subparsers):
parser = subparsers.add_parser(cls.cmd(), help='Connect to the remote host via ssh')
parser.add_argument('portal', help='Name of portal')
parser.add_argument('-t', '--tmux', dest='tmux', nargs='?', default=None, const=cls.DEFAULT_TMUX_SESSION,
metavar='session', help='Automatically open tmux session upon connection. '
'Default session name is `{}`.'.format(cls.DEFAULT_TMUX_SESSION))
def run(self):
# Find, parse and validate configs
with no_print():
portal_name = get_portal_name(self._args.portal)
portal_spec = get_portal_spec(portal_name)
provider_name = get_provider_from_portal(portal_spec)
provider_config = get_provider_config(self._args.config, provider_name)
# Create appropriate command handler for given cloud provider
handler = create_handler(provider_name, provider_config)
identity_file, user, host, disable_known_hosts = handler.get_ssh_params(portal_spec, portal_name)
print('Connecting to the remote machine...')
print('\tssh -i "{}" {}@{}'.format(identity_file, user, host).expandtabs(4))
# If needed, disable strict known-hosts check
options = []
if disable_known_hosts:
options = [
'-o',
'StrictHostKeyChecking=no'
]
# If requested, configure a preamble (a set of commands to be run automatically after connection)
preamble = []
if self._args.tmux is not None:
preamble = [
'-t',
'""tmux attach-session -t {sess} || tmux new-session -s {sess}""'.format(sess=self._args.tmux)
]
print('Upon connection will open tmux session `{}`.'.format(self._args.tmux))
print('')
# Ssh to remote host (effectively replace current process by ssh)
os.execvp('ssh', ['ssh', '-i', identity_file, '{}@{}'.format(user, host)] + options + preamble)
| 1,796
| 167
| 23
|
a5bd7b16ae0ef9281e8935c406154bcc19d183b1
| 10,477
|
py
|
Python
|
pt3/client.py
|
Aerun/pytyle3
|
86876fa7ad652fc99b77f5482559733c95490e84
|
[
"WTFPL"
] | null | null | null |
pt3/client.py
|
Aerun/pytyle3
|
86876fa7ad652fc99b77f5482559733c95490e84
|
[
"WTFPL"
] | null | null | null |
pt3/client.py
|
Aerun/pytyle3
|
86876fa7ad652fc99b77f5482559733c95490e84
|
[
"WTFPL"
] | null | null | null |
import time
import xcffib.xproto
import xpybutil
import xpybutil.event as event
import xpybutil.ewmh as ewmh
import xpybutil.motif as motif
import xpybutil.icccm as icccm
import xpybutil.rect as rect
import xpybutil.util as util
import xpybutil.window as window
from debug import debug
import config
import state
import tile
clients = {}
ignore = [] # Some clients are never gunna make it...
event.connect('PropertyNotify', xpybutil.root, cb_property_notify)
| 34.127036
| 98
| 0.592345
|
import time
import xcffib.xproto
import xpybutil
import xpybutil.event as event
import xpybutil.ewmh as ewmh
import xpybutil.motif as motif
import xpybutil.icccm as icccm
import xpybutil.rect as rect
import xpybutil.util as util
import xpybutil.window as window
from debug import debug
import config
import state
import tile
clients = {}
ignore = [] # Some clients are never gunna make it...
class Client(object):
def __init__(self, wid):
self.wid = wid
self.name = ewmh.get_wm_name(self.wid).reply() or 'N/A'
debug('Connecting to %s' % self)
window.listen(self.wid, 'PropertyChange', 'FocusChange')
event.connect('PropertyNotify', self.wid, self.cb_property_notify)
event.connect('FocusIn', self.wid, self.cb_focus_in)
event.connect('FocusOut', self.wid, self.cb_focus_out)
# This connects to the parent window (decorations)
# We get all resize AND move events... might be too much
self.parentid = window.get_parent_window(self.wid)
window.listen(self.parentid, 'StructureNotify')
event.connect('ConfigureNotify', self.parentid,
self.cb_configure_notify)
# A window should only be floating if that is default
self.floating = getattr(config, 'floats_default', False)
# Not currently in a "moving" state
self.moving = False
# Load some data
self.desk = ewmh.get_wm_desktop(self.wid).reply()
# Add it to this desktop's tilers
tile.update_client_add(self)
# First cut at saving client geometry
self.save()
def remove(self):
tile.update_client_removal(self)
debug('Disconnecting from %s' % self)
event.disconnect('ConfigureNotify', self.parentid)
event.disconnect('PropertyNotify', self.wid)
event.disconnect('FocusIn', self.wid)
event.disconnect('FocusOut', self.wid)
def activate(self):
ewmh.request_active_window_checked(self.wid, source=1).check()
def unmaximize(self):
vatom = util.get_atom('_NET_WM_STATE_MAXIMIZED_VERT')
hatom = util.get_atom('_NET_WM_STATE_MAXIMIZED_HORZ')
ewmh.request_wm_state_checked(self.wid, 0, vatom, hatom).check()
def save(self):
self.saved_geom = window.get_geometry(self.wid)
self.saved_state = ewmh.get_wm_state(self.wid).reply()
def restore(self):
debug('Restoring %s' % self)
if getattr(config, 'remove_decorations', False):
motif.set_hints_checked(self.wid,2,decoration=1).check()
if getattr(config, 'tiles_below', False):
ewmh.request_wm_state_checked(self.wid,0,util.get_atom('_NET_WM_STATE_BELOW')).check()
if self.saved_state:
fullymaxed = False
vatom = util.get_atom('_NET_WM_STATE_MAXIMIZED_VERT')
hatom = util.get_atom('_NET_WM_STATE_MAXIMIZED_HORZ')
if vatom in self.saved_state and hatom in self.saved_state:
fullymaxed = True
ewmh.request_wm_state_checked(self.wid, 1, vatom, hatom).check()
elif vatom in self.saved_state:
ewmh.request_wm_state_checked(self.wid, 1, vatom).check()
elif hatom in self.saved_state:
ewmh.request_wm_state_checked(self.wid, 1, hatom).check()
# No need to continue if we've fully maximized the window
if fullymaxed:
return
mnow = rect.get_monitor_area(window.get_geometry(self.wid),
state.monitors)
mold = rect.get_monitor_area(self.saved_geom, state.monitors)
x, y, w, h = self.saved_geom
# What if the client is on a monitor different than what it was before?
# Use the same algorithm in Openbox to convert one monitor's
# coordinates to another.
if mnow != mold:
nowx, nowy, noww, nowh = mnow
oldx, oldy, oldw, oldh = mold
xrat, yrat = float(noww) / float(oldw), float(nowh) / float(oldh)
x = nowx + (x - oldx) * xrat
y = nowy + (y - oldy) * yrat
w *= xrat
h *= yrat
window.moveresize(self.wid, x, y, w, h)
def moveresize(self, x=None, y=None, w=None, h=None):
# Ignore this if the user is moving the window...
if self.moving:
print 'Sorry but %s is moving...' % self
return
try:
window.moveresize(self.wid, x, y, w, h)
except:
pass
def is_button_pressed(self):
try:
pointer = xpybutil.conn.core.QueryPointer(self.wid).reply()
if pointer is None:
return False
if (xcffib.xproto.KeyButMask.Button1 & pointer.mask or
xcffib.xproto.KeyButMask.Button3 & pointer.mask):
return True
except xcffib.xproto.BadWindow:
pass
return False
def cb_focus_in(self, e):
if self.moving and e.mode == xcffib.xproto.NotifyMode.Ungrab:
state.GRAB = None
self.moving = False
tile.update_client_moved(self)
def cb_focus_out(self, e):
if e.mode == xcffib.xproto.NotifyMode.Grab:
state.GRAB = self
def cb_configure_notify(self, e):
if state.GRAB is self and self.is_button_pressed():
self.moving = True
def cb_property_notify(self, e):
aname = util.get_atom_name(e.atom)
try:
if aname == '_NET_WM_DESKTOP':
if should_ignore(self.wid):
untrack_client(self.wid)
return
olddesk = self.desk
self.desk = ewmh.get_wm_desktop(self.wid).reply()
if self.desk is not None and self.desk != olddesk:
tile.update_client_desktop(self, olddesk)
else:
self.desk = olddesk
elif aname == '_NET_WM_STATE':
if should_ignore(self.wid):
untrack_client(self.wid)
return
except xcffib.xproto.BadWindow:
pass # S'ok...
def __str__(self):
return '{%s (%d)}' % (self.name[0:30], self.wid)
def update_clients():
client_list = ewmh.get_client_list_stacking().reply()
client_list = list(reversed(client_list))
for c in client_list:
if c not in clients:
track_client(c)
for c in clients.keys():
if c not in client_list:
untrack_client(c)
def track_client(client):
assert client not in clients
try:
if not should_ignore(client):
if state.PYTYLE_STATE == 'running':
# This is truly unfortunate and only seems to be necessary when
# a client comes back from an iconified state. This causes a
# slight lag when a new window is mapped, though.
time.sleep(0.2)
clients[client] = Client(client)
except xcffib.xproto.BadWindow:
debug('Window %s was destroyed before we could finish inspecting it. '
'Untracking it...' % client)
untrack_client(client)
def untrack_client(client):
if client not in clients:
return
c = clients[client]
del clients[client]
c.remove()
def should_ignore(client):
# Don't waste time on clients we'll never possibly tile
if client in ignore:
return True
nm = ewmh.get_wm_name(client).reply()
wm_class = icccm.get_wm_class(client).reply()
if wm_class is not None:
try:
inst, cls = wm_class
matchNames = set([inst.lower(), cls.lower()])
if matchNames.intersection(config.ignore):
debug('Ignoring %s because it is in the ignore list' % nm)
return True
if hasattr(config, 'tile_only') and config.tile_only:
if not matchNames.intersection(config.tile_only):
debug('Ignoring %s because it is not in the tile_only '
'list' % nm)
return True
except ValueError:
pass
if icccm.get_wm_transient_for(client).reply() is not None:
debug('Ignoring %s because it is transient' % nm)
ignore.append(client)
return True
wtype = ewmh.get_wm_window_type(client).reply()
if wtype:
for atom in wtype:
aname = util.get_atom_name(atom)
if aname in ('_NET_WM_WINDOW_TYPE_DESKTOP',
'_NET_WM_WINDOW_TYPE_DOCK',
'_NET_WM_WINDOW_TYPE_TOOLBAR',
'_NET_WM_WINDOW_TYPE_MENU',
'_NET_WM_WINDOW_TYPE_UTILITY',
'_NET_WM_WINDOW_TYPE_SPLASH',
'_NET_WM_WINDOW_TYPE_DIALOG',
'_NET_WM_WINDOW_TYPE_DROPDOWN_MENU',
'_NET_WM_WINDOW_TYPE_POPUP_MENU',
'_NET_WM_WINDOW_TYPE_TOOLTIP',
'_NET_WM_WINDOW_TYPE_NOTIFICATION',
'_NET_WM_WINDOW_TYPE_COMBO',
'_NET_WM_WINDOW_TYPE_DND'):
debug('Ignoring %s because it has type %s' % (nm, aname))
ignore.append(client)
return True
wstate = ewmh.get_wm_state(client).reply()
if wstate is None:
debug('Ignoring %s because it does not have a state' % nm)
return True
for atom in wstate:
aname = util.get_atom_name(atom)
# For now, while I decide how to handle these guys
if aname == '_NET_WM_STATE_STICKY':
debug('Ignoring %s because it is sticky and they are weird' % nm)
return True
if aname in ('_NET_WM_STATE_SHADED', '_NET_WM_STATE_HIDDEN',
'_NET_WM_STATE_FULLSCREEN', '_NET_WM_STATE_MODAL'):
debug('Ignoring %s because it has state %s' % (nm, aname))
return True
d = ewmh.get_wm_desktop(client).reply()
if d == 0xffffffff:
debug('Ignoring %s because it\'s on all desktops' \
'(not implemented)' % nm)
return True
return False
def cb_property_notify(e):
aname = util.get_atom_name(e.atom)
if aname == '_NET_CLIENT_LIST_STACKING':
update_clients()
event.connect('PropertyNotify', xpybutil.root, cb_property_notify)
| 9,523
| 0
| 488
|
aba168a92af45bb1cc54c1d9fa128f27dfac8b46
| 411
|
py
|
Python
|
movies/details/models.py
|
tehran4e/workspace
|
1a479458ae113c02e6597578f289e5f9283a69f2
|
[
"MIT"
] | null | null | null |
movies/details/models.py
|
tehran4e/workspace
|
1a479458ae113c02e6597578f289e5f9283a69f2
|
[
"MIT"
] | null | null | null |
movies/details/models.py
|
tehran4e/workspace
|
1a479458ae113c02e6597578f289e5f9283a69f2
|
[
"MIT"
] | null | null | null |
from django.db import models
| 27.4
| 61
| 0.693431
|
from django.db import models
class Artist(models.Model):
name = models.CharField(max_length=200)
def __str__(self):
return self.name
class Movie(models.Model):
title = models.CharField(max_length=100)
year = models.IntegerField()
director = models.CharField(max_length=100)
story = models.TextField()
artistsName = models.ManyToManyField(Artist, blank=True)
| 23
| 307
| 49
|
1782765336d1c920b25f3e04b8d6dd09f0344112
| 905
|
py
|
Python
|
index_cli/core/json_type.py
|
lishnih/index_cli
|
57f23d5df5168bcc73e23e0eeabbb8317014585b
|
[
"MIT"
] | null | null | null |
index_cli/core/json_type.py
|
lishnih/index_cli
|
57f23d5df5168bcc73e23e0eeabbb8317014585b
|
[
"MIT"
] | null | null | null |
index_cli/core/json_type.py
|
lishnih/index_cli
|
57f23d5df5168bcc73e23e0eeabbb8317014585b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
# Stan 2018-09-27
from __future__ import (division, absolute_import,
print_function, unicode_literals)
import json
from sqlalchemy.types import UserDefinedType, TypeDecorator, Text
# class JsonType(UserDefinedType):
# def get_col_spec(self, **kw):
# return "JSON"
#
# def bind_processor(self, dialect):
# def process(value):
# return json.dumps(value, ensure_ascii=False).encode('utf8')
#
# return process
#
# def result_processor(self, dialect, coltype):
# def process(value):
# return json.loads(value)
#
# return process
| 23.815789
| 73
| 0.653039
|
#!/usr/bin/env python
# coding=utf-8
# Stan 2018-09-27
from __future__ import (division, absolute_import,
print_function, unicode_literals)
import json
from sqlalchemy.types import UserDefinedType, TypeDecorator, Text
class JsonType(TypeDecorator):
impl = Text
def process_bind_param(self, value, dialect):
return json.dumps(value, ensure_ascii=False)
def process_result_value(self, value, dialect):
return json.loads(value)
# class JsonType(UserDefinedType):
# def get_col_spec(self, **kw):
# return "JSON"
#
# def bind_processor(self, dialect):
# def process(value):
# return json.dumps(value, ensure_ascii=False).encode('utf8')
#
# return process
#
# def result_processor(self, dialect, coltype):
# def process(value):
# return json.loads(value)
#
# return process
| 136
| 79
| 23
|
ed5e6c0f6c69ec6fdd90183710bf386418d25c66
| 1,563
|
py
|
Python
|
tests/test_settings.py
|
sneJ-/chaostoolkit-lib
|
07b00c8bffe8cda7494b049f9640cdbba3bad8bc
|
[
"Apache-2.0"
] | 1
|
2019-11-18T19:57:42.000Z
|
2019-11-18T19:57:42.000Z
|
tests/test_settings.py
|
sneJ-/chaostoolkit-lib
|
07b00c8bffe8cda7494b049f9640cdbba3bad8bc
|
[
"Apache-2.0"
] | null | null | null |
tests/test_settings.py
|
sneJ-/chaostoolkit-lib
|
07b00c8bffe8cda7494b049f9640cdbba3bad8bc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import os.path
from chaoslib.settings import get_loaded_settings, load_settings, save_settings
settings_dir = os.path.join(os.path.dirname(__file__), "fixtures")
| 28.944444
| 79
| 0.715931
|
# -*- coding: utf-8 -*-
import os.path
from chaoslib.settings import get_loaded_settings, load_settings, save_settings
settings_dir = os.path.join(os.path.dirname(__file__), "fixtures")
def test_do_not_fail_when_settings_do_not_exist():
assert load_settings(
os.path.join(settings_dir, "no_settings.yaml")) is None
def test_load_settings():
settings = load_settings(os.path.join(settings_dir, "settings.yaml"))
assert "notifications" in settings
def test_save_settings():
settings = load_settings(os.path.join(settings_dir, "settings.yaml"))
new_settings_location = os.path.join(settings_dir, "new_settings.yaml")
try:
os.remove(new_settings_location)
except OSError:
pass
save_settings(settings, new_settings_location)
saved_settings = load_settings(new_settings_location)
assert "notifications" in saved_settings
os.remove(new_settings_location)
def test_load_unsafe_settings():
settings = load_settings(
os.path.join(settings_dir, "unsafe-settings.yaml"))
assert settings is None
def test_create_settings_file_on_save():
ghost = os.path.abspath(os.path.join(settings_dir, "bah", "ghost.yaml"))
assert not os.path.exists(ghost)
try:
save_settings({}, ghost)
assert os.path.exists(ghost)
finally:
try:
os.remove(ghost)
except OSError:
pass
def test_get_loaded_settings():
settings = load_settings(os.path.join(settings_dir, "settings.yaml"))
assert get_loaded_settings() is settings
| 1,231
| 0
| 138
|
0da55faa65c939131e74dd60e3f512e40b9acbf0
| 49
|
py
|
Python
|
instance/config.py
|
davideguidobene/cinema-web-app
|
1a83576a1e37ea69bec2b2a80f584912cfc9b264
|
[
"MIT"
] | null | null | null |
instance/config.py
|
davideguidobene/cinema-web-app
|
1a83576a1e37ea69bec2b2a80f584912cfc9b264
|
[
"MIT"
] | null | null | null |
instance/config.py
|
davideguidobene/cinema-web-app
|
1a83576a1e37ea69bec2b2a80f584912cfc9b264
|
[
"MIT"
] | null | null | null |
import os
SECRET_KEY = os.getenv("SECRET_KEY")
| 9.8
| 36
| 0.734694
|
import os
SECRET_KEY = os.getenv("SECRET_KEY")
| 0
| 0
| 0
|
7a777dd89c577420d917a03e50e383d90d26f239
| 652
|
py
|
Python
|
cit_vipnet/inventory/migrations/0002_auto_20210906_1138.py
|
mr-Marshanskiy/cit-vipnet
|
6a0e56a13cae57252957c82af3d4e98da5d9d6a4
|
[
"BSD-3-Clause"
] | null | null | null |
cit_vipnet/inventory/migrations/0002_auto_20210906_1138.py
|
mr-Marshanskiy/cit-vipnet
|
6a0e56a13cae57252957c82af3d4e98da5d9d6a4
|
[
"BSD-3-Clause"
] | null | null | null |
cit_vipnet/inventory/migrations/0002_auto_20210906_1138.py
|
mr-Marshanskiy/cit-vipnet
|
6a0e56a13cae57252957c82af3d4e98da5d9d6a4
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 2.2 on 2021-09-06 08:38
from django.db import migrations
| 29.636364
| 137
| 0.627301
|
# Generated by Django 2.2 on 2021-09-06 08:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('inventory', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='hardwareplatform',
options={'ordering': ['-name'], 'verbose_name': 'Аппаратная платформа', 'verbose_name_plural': 'Аппаратные платформы'},
),
migrations.AlterModelOptions(
name='modification',
options={'ordering': ['-name'], 'verbose_name': 'Модификация исполненеия', 'verbose_name_plural': 'Модификации исполненеий'},
),
]
| 0
| 630
| 23
|
a218e268b041cea723c99b9e20c6c99c665876db
| 88
|
py
|
Python
|
main.py
|
hailleenvarela/data-2022-1
|
8f92e1325b6fcbf727b426c50ddf32d10e38db89
|
[
"MIT"
] | null | null | null |
main.py
|
hailleenvarela/data-2022-1
|
8f92e1325b6fcbf727b426c50ddf32d10e38db89
|
[
"MIT"
] | 1
|
2022-02-27T23:23:50.000Z
|
2022-02-27T23:23:50.000Z
|
main.py
|
hailleenvarela/data-2022-1
|
8f92e1325b6fcbf727b426c50ddf32d10e38db89
|
[
"MIT"
] | 3
|
2022-02-27T23:14:24.000Z
|
2022-03-02T00:47:12.000Z
|
from source.etl import ETL
x = ETL()
df = x.extract(True)
x.transform(df)
#x.load(df)
| 11
| 26
| 0.670455
|
from source.etl import ETL
x = ETL()
df = x.extract(True)
x.transform(df)
#x.load(df)
| 0
| 0
| 0
|
97d4c4d8955d3f56c8e11f52c3ceebef2f337f77
| 2,533
|
py
|
Python
|
2021/advent2021_9.py
|
aatango/Advent-of-Code
|
f229abc7acaaa0a2316839bf11fa7e2fdf9caf2c
|
[
"MIT"
] | null | null | null |
2021/advent2021_9.py
|
aatango/Advent-of-Code
|
f229abc7acaaa0a2316839bf11fa7e2fdf9caf2c
|
[
"MIT"
] | null | null | null |
2021/advent2021_9.py
|
aatango/Advent-of-Code
|
f229abc7acaaa0a2316839bf11fa7e2fdf9caf2c
|
[
"MIT"
] | null | null | null |
"""Advent of Code 2021, day 9: Smoke Basin"""
def main(input_matrix: tuple[str]) -> int:
"""
Find all of the low points on your heightmap.
What is the sum of the risk levels of all low points on your heightmap?
"""
# It's a brute force approach that does not scale to part two,
# but it's what I could think of with very little time.
# Transform string input into usable int values.
for line in input_matrix:
int_line: list[int] = []
for num in line:
int_line.append(int(num))
DEPTH_MAP.append(int_line)
# Find local minima.
low_points: list[int] = []
for line_index, line in enumerate(DEPTH_MAP):
for point_index, point in enumerate(line):
neighbours: list[int] = []
if point_index - 1 in range(0, len(line)):
neighbours.append(DEPTH_MAP[line_index][point_index - 1])
if point_index + 1 in range(0, len(line)):
neighbours.append(DEPTH_MAP[line_index][point_index + 1])
if line_index - 1 in range(0, len(DEPTH_MAP)):
neighbours.append(DEPTH_MAP[line_index - 1][point_index])
if line_index + 1 in range(0, len(DEPTH_MAP)):
neighbours.append(DEPTH_MAP[line_index + 1][point_index])
if point < min(neighbours):
low_points.append(point + 1)
return sum(low_points)
def part_two():
"""What do you get if you multiply together the sizes of the three largest basins?
Unlike most other days, this part_two() is dependent on main(),
as it's there that the global DEPTH_MAP is generated.
"""
basins_sizes: list[int] = []
# This loop is to initiate recursive calls, whenever it finds a new basin.
for line_index, line in enumerate(DEPTH_MAP):
for point_index, point in enumerate(line):
if point < 9:
basins_sizes.append(map_basin((line_index, point_index)))
basins_sizes.sort()
return basins_sizes[-3] * basins_sizes[-2] * basins_sizes[-1]
if __name__ == "__main__":
with open("../input", "r") as file:
INPUT_FILE = tuple(file.read().splitlines())
# Global so that it doesn't have to be remade for part two.
DEPTH_MAP: list[list[int]] = []
print(main(INPUT_FILE))
print(part_two())
| 28.784091
| 83
| 0.684959
|
"""Advent of Code 2021, day 9: Smoke Basin"""
def main(input_matrix: tuple[str]) -> int:
"""
Find all of the low points on your heightmap.
What is the sum of the risk levels of all low points on your heightmap?
"""
# It's a brute force approach that does not scale to part two,
# but it's what I could think of with very little time.
# Transform string input into usable int values.
for line in input_matrix:
int_line: list[int] = []
for num in line:
int_line.append(int(num))
DEPTH_MAP.append(int_line)
# Find local minima.
low_points: list[int] = []
for line_index, line in enumerate(DEPTH_MAP):
for point_index, point in enumerate(line):
neighbours: list[int] = []
if point_index - 1 in range(0, len(line)):
neighbours.append(DEPTH_MAP[line_index][point_index - 1])
if point_index + 1 in range(0, len(line)):
neighbours.append(DEPTH_MAP[line_index][point_index + 1])
if line_index - 1 in range(0, len(DEPTH_MAP)):
neighbours.append(DEPTH_MAP[line_index - 1][point_index])
if line_index + 1 in range(0, len(DEPTH_MAP)):
neighbours.append(DEPTH_MAP[line_index + 1][point_index])
if point < min(neighbours):
low_points.append(point + 1)
return sum(low_points)
def part_two():
"""What do you get if you multiply together the sizes of the three largest basins?
Unlike most other days, this part_two() is dependent on main(),
as it's there that the global DEPTH_MAP is generated.
"""
def map_basin(pos: tuple[int], basin_size: int = 0) -> int:
if DEPTH_MAP[pos[0]][pos[1]] >= 9:
return basin_size
basin_size += 1
DEPTH_MAP[pos[0]][pos[1]] = 9
basin_size += map_basin((max(pos[0] - 1, 0), pos[1]))
basin_size += map_basin((min(pos[0] + 1, len(DEPTH_MAP) - 1), pos[1]))
basin_size += map_basin((pos[0], max(pos[1] - 1, 0)))
basin_size += map_basin((pos[0], min(pos[1] + 1, len(DEPTH_MAP[0]) - 1)))
return basin_size
basins_sizes: list[int] = []
# This loop is to initiate recursive calls, whenever it finds a new basin.
for line_index, line in enumerate(DEPTH_MAP):
for point_index, point in enumerate(line):
if point < 9:
basins_sizes.append(map_basin((line_index, point_index)))
basins_sizes.sort()
return basins_sizes[-3] * basins_sizes[-2] * basins_sizes[-1]
if __name__ == "__main__":
with open("../input", "r") as file:
INPUT_FILE = tuple(file.read().splitlines())
# Global so that it doesn't have to be remade for part two.
DEPTH_MAP: list[list[int]] = []
print(main(INPUT_FILE))
print(part_two())
| 430
| 0
| 24
|
8485ba5f72fd09655120694f54a0ea9e297a8fe8
| 545
|
py
|
Python
|
pythondata_cpu_blackparrot/system_verilog/black-parrot/external/basejump_stl/testing/bsg_test/dramsim3_bandwidth2/const_random.py
|
litex-hub/pythondata-cpu-blackparrot
|
ba50883f12d33e1d834640640c84ddc9329bb68a
|
[
"BSD-3-Clause"
] | 3
|
2021-05-12T21:57:55.000Z
|
2021-07-29T19:56:04.000Z
|
pythondata_cpu_blackparrot/system_verilog/black-parrot/external/basejump_stl/testing/bsg_test/dramsim3_bandwidth2/const_random.py
|
litex-hub/litex-data-cpu-blackparrot
|
ba50883f12d33e1d834640640c84ddc9329bb68a
|
[
"BSD-3-Clause"
] | 1
|
2020-05-02T02:41:24.000Z
|
2020-05-02T02:44:25.000Z
|
pythondata_cpu_blackparrot/system_verilog/black-parrot/external/basejump_stl/testing/bsg_test/dramsim3_bandwidth2/const_random.py
|
litex-hub/litex-data-cpu-blackparrot
|
ba50883f12d33e1d834640640c84ddc9329bb68a
|
[
"BSD-3-Clause"
] | 2
|
2020-05-01T08:33:19.000Z
|
2021-07-29T19:56:12.000Z
|
import sys
import random
from trace_gen import *
if __name__ == "__main__":
random.seed(0)
num_cache_p = int(sys.argv[1])
block_size_in_words_p = int(sys.argv[2])
tg = TraceGen(block_size_in_words_p)
tg.clear_tags()
#words = (2**18)/num_cache_p # 1MB
words = (2**18)/num_cache_p # 1MB
max_range = (2**14)# 64KB
for i in range(words):
taddr = random.randint(0, max_range-1) << 2
write_not_read = random.randint(0,1)
if write_not_read:
tg.send_write(taddr)
else:
tg.send_read(taddr)
tg.done()
| 20.185185
| 47
| 0.66055
|
import sys
import random
from trace_gen import *
if __name__ == "__main__":
random.seed(0)
num_cache_p = int(sys.argv[1])
block_size_in_words_p = int(sys.argv[2])
tg = TraceGen(block_size_in_words_p)
tg.clear_tags()
#words = (2**18)/num_cache_p # 1MB
words = (2**18)/num_cache_p # 1MB
max_range = (2**14)# 64KB
for i in range(words):
taddr = random.randint(0, max_range-1) << 2
write_not_read = random.randint(0,1)
if write_not_read:
tg.send_write(taddr)
else:
tg.send_read(taddr)
tg.done()
| 0
| 0
| 0
|
74ee5adaad45c0809358f0e7260945651ef42945
| 5,699
|
py
|
Python
|
touchdown/tests/test_aws_vpc_subnet.py
|
yaybu/touchdown
|
70ecda5191ce2d095bc074dcb23bfa1584464814
|
[
"Apache-2.0"
] | 14
|
2015-01-05T18:18:04.000Z
|
2022-02-07T19:35:12.000Z
|
touchdown/tests/test_aws_vpc_subnet.py
|
yaybu/touchdown
|
70ecda5191ce2d095bc074dcb23bfa1584464814
|
[
"Apache-2.0"
] | 106
|
2015-01-06T00:17:13.000Z
|
2019-09-07T00:35:32.000Z
|
touchdown/tests/test_aws_vpc_subnet.py
|
yaybu/touchdown
|
70ecda5191ce2d095bc074dcb23bfa1584464814
|
[
"Apache-2.0"
] | 5
|
2015-01-30T10:18:24.000Z
|
2022-02-07T19:35:13.000Z
|
# Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from touchdown.tests.aws import StubberTestCase
from touchdown.tests.fixtures.aws import (
NetworkAclFixture,
RouteTableFixture,
VpcFixture,
)
from touchdown.tests.stubs.aws import SubnetStubber
| 32.565714
| 87
| 0.589402
|
# Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from touchdown.tests.aws import StubberTestCase
from touchdown.tests.fixtures.aws import (
NetworkAclFixture,
RouteTableFixture,
VpcFixture,
)
from touchdown.tests.stubs.aws import SubnetStubber
class TestSubnetCreation(StubberTestCase):
def test_create_subnet(self):
goal = self.create_goal("apply")
vpcf = self.fixtures.enter_context(VpcFixture(goal, self.aws))
subnet = self.fixtures.enter_context(
SubnetStubber(
goal.get_service(
vpcf.vpc.add_subnet(
name="test-subnet", cidr_block="192.168.0.0/25"
),
"apply",
)
)
)
subnet.add_describe_subnets_empty_response()
subnet.add_create_subnet()
subnet.add_create_tags(Name="test-subnet")
# Wait for the subnet to exist
subnet.add_describe_subnets_empty_response()
subnet.add_describe_subnets_empty_response()
subnet.add_describe_subnets_one_response()
# Call describe_object again to make sure remote state is correctly cached
subnet.add_describe_subnets_one_response()
subnet.add_describe_network_acls()
subnet.add_describe_route_tables()
goal.execute()
def test_adding_route_table_to_subnet(self):
goal = self.create_goal("apply")
vpcf = self.fixtures.enter_context(VpcFixture(goal, self.aws))
route_table = self.fixtures.enter_context(
RouteTableFixture(goal, self.aws, vpcf.vpc)
)
subnet = self.fixtures.enter_context(
SubnetStubber(
goal.get_service(
vpcf.vpc.add_subnet(
name="test-subnet",
cidr_block="192.168.0.0/25",
route_table=route_table,
),
"apply",
)
)
)
subnet.add_describe_subnets_one_response()
subnet.add_describe_network_acls()
subnet.add_describe_route_tables()
subnet.add_associate_route_table("rt-52f2381b")
goal.execute()
def test_adding_nacl_table_to_subnet(self):
goal = self.create_goal("apply")
vpcf = self.fixtures.enter_context(VpcFixture(goal, self.aws))
nacl = self.fixtures.enter_context(NetworkAclFixture(goal, self.aws, vpcf.vpc))
subnet = self.fixtures.enter_context(
SubnetStubber(
goal.get_service(
vpcf.vpc.add_subnet(
name="test-subnet",
cidr_block="192.168.0.0/25",
network_acl=nacl,
),
"apply",
)
)
)
subnet.add_describe_subnets_one_response()
subnet.add_describe_network_acls()
subnet.add_describe_route_tables()
subnet.add_replace_network_acl_association()
goal.execute()
def test_create_subnet_idempotent(self):
goal = self.create_goal("apply")
vpcf = self.fixtures.enter_context(VpcFixture(goal, self.aws))
subnet = self.fixtures.enter_context(
SubnetStubber(
goal.get_service(
vpcf.vpc.add_subnet(
name="test-subnet", cidr_block="192.168.0.0/25"
),
"apply",
)
)
)
subnet.add_describe_subnets_one_response()
subnet.add_describe_network_acls()
subnet.add_describe_route_tables()
self.assertEqual(len(list(goal.plan())), 0)
self.assertEqual(len(goal.get_changes(subnet.resource)), 0)
class TestSubnetDestroy(StubberTestCase):
def test_destroy_subnet(self):
goal = self.create_goal("destroy")
vpcf = self.fixtures.enter_context(VpcFixture(goal, self.aws))
subnet = self.fixtures.enter_context(
SubnetStubber(
goal.get_service(
vpcf.vpc.add_subnet(
name="test-subnet", cidr_block="192.168.0.0/25"
),
"destroy",
)
)
)
subnet.add_describe_subnets_one_response()
subnet.add_describe_network_acls()
subnet.add_describe_route_tables()
subnet.add_delete_subnet()
goal.execute()
def test_destroy_subnet_idempotent(self):
goal = self.create_goal("destroy")
vpcf = self.fixtures.enter_context(VpcFixture(goal, self.aws))
subnet = self.fixtures.enter_context(
SubnetStubber(
goal.get_service(
vpcf.vpc.add_subnet(
name="test-subnet", cidr_block="192.168.0.0/25"
),
"destroy",
)
)
)
subnet.add_describe_subnets_empty_response()
self.assertEqual(len(list(goal.plan())), 0)
self.assertEqual(len(goal.get_changes(subnet.resource)), 0)
| 4,665
| 41
| 206
|
62b58d3a59f61b26ea27943ea666bc132820d76e
| 8,597
|
py
|
Python
|
pymbs/processing/loops/fourbar.py
|
brutzl/pymbs
|
fb7c91435f56b5c4d460f82f081d5d1960fea886
|
[
"MIT"
] | null | null | null |
pymbs/processing/loops/fourbar.py
|
brutzl/pymbs
|
fb7c91435f56b5c4d460f82f081d5d1960fea886
|
[
"MIT"
] | null | null | null |
pymbs/processing/loops/fourbar.py
|
brutzl/pymbs
|
fb7c91435f56b5c4d460f82f081d5d1960fea886
|
[
"MIT"
] | null | null | null |
from pymbs.processing.loops.loop import Loop
from pymbs.common.functions import sqrt
from pymbs.processing import Frame
from pymbs.processing.loads.constraint import Constraint
from numpy import pi
from pymbs.symbolics import Matrix, eye, cos, sin, atan, atan2, acos, zeros, transpose
AL = 'FB_%s_AL'
BE = 'FB_%s_BE'
GA = 'FB_%s_GA'
DE = 'FB_%s_DE'
L1 = 'FB_%s_L1'
L2 = 'FB_%s_L2'
L3 = 'FB_%s_L3'
L4 = 'FB_%s_L4'
PHI = 'FB_%s_PHI'
PSI = 'FB_%s_PSI'
THETA = 'FB_%s_THETA'
A = 'FB_%s_A'
B = 'FB_%s_B'
C = 'FB_%s_C'
D = 'FB_%s_D'
E = 'FB_%s_E'
F = 'FB_%s_F'
from pymbs.symbolics import Graph
class FourBar(Loop):
'''
Explicit Treatment of a Four Bar Linkage Mechanism
'''
'''
Sketch:
B--3--C
/ \
2 4
/ \
A-----1------D
'''
def __init__(self, name, csCa, csCb, posture):
'''
Constructor
@param setup: Four Bar Linkage has two setups: -1, 1
'''
# Assertions
assert ( isinstance(csCa, Frame) )
assert ( isinstance(csCb, Frame) )
assert ( isinstance(posture, int) )
assert ( (posture == 1) or (posture == -1 ))
# Check parents
if (csCa.parentBody.joint is None):
raise ValueError('Loop "%s": Coordinate System "%s" is directly connected to the world!'%(name,csCa.name))
if (csCb.parentBody.joint is None):
raise ValueError('Loop "%s": Coordinate System "%s" is directly connected to the world!'%(name,csCb.name))
# Build complete FourBarLinkage
jB = csCa.parentBody.joint
jD = csCb.parentBody.joint
if (jB.coordSys.parentBody.joint == None):
jB = csCb.parentBody.joint
jD = csCa.parentBody.joint
jA = jB.coordSys.parentBody.joint
csC3 = csCb
csC4 = csCa
else:
jA = jB.coordSys.parentBody.joint
csC3 = csCa
csC4 = csCb
# Do the Joints have the same axis of Rotation
if (jA.Phi == Matrix([1,0,0])):
self.sign = 1
self.pick = Matrix([[0,1,0],
[0,0,1]])
elif (jA.Phi == Matrix([0,1,0])):
self.sign = -1
self.pick = Matrix([[1,0,0],
[0,0,1]])
elif (jA.Phi == Matrix([0,0,1])):
self.sign = 1
self.pick = Matrix([[1,0,0],
[0,1,0]])
else:
raise ValueError('Loop "%s": Axis of Rotation must be either x,y or z!'%name)
assert( jA.Phi == jB.Phi ), 'jA.Phi(%s): %s, jB.Phi(%s): %s'%(jA.name,jA.Phi,jB.name,jB.Phi)
assert( jA.Phi == jD.Phi ), 'jA.Phi(%s): %s, jD.Phi(%s): %s'%(jA.name,jA.Phi,jD.name,jD.Phi)
assert( jA.Psi.norm() == 0 )
assert( jB.Psi.norm() == 0 )
assert( jD.Psi.norm() == 0 )
# Are All Coordinate Systems aligned like their parentBody?
assert( (jA.coordSys.R - eye(3)) == zeros(3) )
assert( (jB.coordSys.R - eye(3)) == zeros(3) )
assert( (jD.coordSys.R - eye(3)) == zeros(3) )
# Check that bodies between joints are the same
assert( jA.coordSys.parentBody == jD.coordSys.parentBody )
assert( jA.body == jB.coordSys.parentBody )
assert( jB.body == csC3.parentBody )
assert( jD.body == csC4.parentBody )
# Super Constructor
Loop.__init__(self, name)
# Save Parameters
self.jA = jA
self.jB = jB
self.jD = jD
self.csC3 = csC3
self.csC4 = csC4
self.posture = posture
# Independent Coordinates
self.u = [jA.q]
self.ud = [jA.qd]
self.udd = [jA.qdd]
# Dependent Coordinates
self.v = [jB.q, jD.q]
self.vd = [jB.qd, jD.qd]
self.vdd = [jB.qdd, jD.qdd]
def calc(self, graph):
'''
Returns precalculated v(u), Bvu and b_prime, s.t.
q = [u,v]', where
u: independent coordinates
v: dependent coordinates
Starting from the Constraint Equation: Phi(q) = 0,
One Obtains by Differentiation:
(d(Phi)/du)*u_dot + (d(Phi)/dv)*v_dot = 0
Ju*u_dot + Jv+v_dot = 0
Thus, v_dot = -(inv(Jv)*Ju)*u_dot = Bvu*u_dot, with Jv = d(Phi)/dv and Ju = d(Phi)/du
Differentiating once more, yields
Ju*u_ddot + Jv*v_ddot + [Ju_dot, Jv_dot]*[u_dot,v_dot]' = 0
Ju*u_ddot + Jv*v_ddot + J_dot*q_dot = 0
Using this relations, one may obtain an expression for v_ddot
v_ddot = -(inv(Jv)*Ju)*u_ddot - inv(Jv)*J_dot*q_dot
= Bvu*u_ddot + b_prime, with b_prime = -inv(Jv)*J_dot*q_dot
Finally one can transform the Equation of Motion
M*q_ddot + h = f + W'*mu
M*(J*u_ddot + b) + h = f + W'*mu with J = [1, Bvu']' and b = [0,b_prime']'
(J'*M*J)*u_ddot + J'*M*b + J'*h = J'*f, since J'*W' = 0
M_star*u_ddot + h_star = f_star
M_star = (J'*M*J)
h_star = J'*M*b + J'*h
f_star = J'*f
'''
assert isinstance(graph, Graph)
# Abbrevations
s = self.sign
# Generalised Coordinates
q1 = self.jA.q # u[0] # angle between x-axes
q1d = self.jA.qd
q2 = self.jB.q # v[0] # angle between x-axes
q2d = self.jB.qd
q3 = self.jD.q # v[1] # angle between x-axes
q3d = self.jD.qd
# Length of bars and angle between x-axis and bar
l1_vec = self.jD.coordSys.p - self.jA.coordSys.p
l1_vec2 = self.pick*l1_vec
l1 = graph.addEquation(L1%self.name, sqrt((transpose(l1_vec)*l1_vec)))
alpha = graph.addEquation(AL%self.name, s*atan2(l1_vec2[1],l1_vec2[0]))
l2_vec = self.jB.coordSys.p
l2_vec2 = self.pick*l2_vec
l2 = graph.addEquation(L2%self.name, sqrt((transpose(l2_vec)*l2_vec)))
beta = graph.addEquation(BE%self.name, s*atan2(l2_vec2[1],l2_vec2[0]))
l3_vec = self.csC3.p
l3_vec2 = self.pick*l3_vec
l3 = graph.addEquation(L3%self.name, sqrt((transpose(l3_vec)*l3_vec)))
gamma = graph.addEquation(GA%self.name, s*atan2(l3_vec2[1],l3_vec2[0]))
l4_vec = self.csC4.p
l4_vec2 = self.pick*l4_vec
l4 = graph.addEquation(L4%self.name, sqrt((transpose(l4_vec)*l4_vec)))
delta = graph.addEquation(DE%self.name, s*atan2(l4_vec2[1],l4_vec2[0]))
# angle between bars
phi_prime = graph.addEquation(PHI%self.name, q1 + beta - alpha)
# A = P1, B = P2, C = P3
#P1 = graph.addEquation(A%self.name, 2*l4*(l1-l2*cos(phi_prime)))
#P2 = graph.addEquation(B%self.name, -2*l2*l4*sin(phi_prime))
#P3 = graph.addEquation(C%self.name, l1**2+l2**2-l3**2+l4**2-2*l1*l2*cos(phi_prime))
# D = P1, E = P2, F = P3
P4 = graph.addEquation(D%self.name, 2*l3*(l2-l1*cos(-phi_prime)))
P5 = graph.addEquation(E%self.name, -2*l1*l3*sin(-phi_prime))
P6 = graph.addEquation(F%self.name, l2**2+l1**2-l4**2+l3**2-2*l2*l1*cos(-phi_prime))
# Calculate v
theta_prime = graph.addEquation(THETA%self.name, 2*atan((P5-self.posture*sqrt(P4**2+P5**2-P6**2))/(P4-P6)))
psi_prime = graph.addEquation(PSI%self.name, ((l2*sin(phi_prime)+l3*sin(phi_prime+theta_prime))/abs(l2*sin(phi_prime)+l3*sin(phi_prime+theta_prime)))*acos((l2*cos(phi_prime)+l3*cos(phi_prime+theta_prime)-l1)/l4))
v1 = (psi_prime + alpha - delta)
v0 = (theta_prime + beta - gamma)
Bvu = Matrix( [[-l2*sin(phi_prime-psi_prime)/(l3*sin(phi_prime+theta_prime-psi_prime))-1], [(l2*sin(theta_prime))/(l4*sin(phi_prime+theta_prime-psi_prime))]] )
b_prime = Matrix( [-(q1d**2*l2*cos(phi_prime-psi_prime)+l3*cos(phi_prime+theta_prime-psi_prime)*(q1d+q2d)**2-l4*q3d**2)/(l3*sin(phi_prime+theta_prime-psi_prime)) , -(q1d**2*l2*cos(theta_prime)+l3*(q1d+q2d)**2-l4*q3d**2*cos(phi_prime+theta_prime-psi_prime))/(l4*sin(phi_prime+theta_prime-psi_prime)) ] )
return ([v0,v1],Bvu,b_prime)
def applyConstraintLoads(self):
'''
apply Constraint Forces at the end of the cut
'''
# locking all directions perpendicular to axis of rotation
transLock = [0,0,0]
for i in [0,1,2]:
if (self.jA.Phi[i] == 0):
transLock[i] = 1
# apply Constraint
c = Constraint(name='Constraint_%s'%self.name, parent=self.csC3, child=self.csC4, \
transLock=transLock, rotLock=[0,0,0], active=False)
# return load object
return c
| 35.378601
| 310
| 0.562289
|
from pymbs.processing.loops.loop import Loop
from pymbs.common.functions import sqrt
from pymbs.processing import Frame
from pymbs.processing.loads.constraint import Constraint
from numpy import pi
from pymbs.symbolics import Matrix, eye, cos, sin, atan, atan2, acos, zeros, transpose
AL = 'FB_%s_AL'
BE = 'FB_%s_BE'
GA = 'FB_%s_GA'
DE = 'FB_%s_DE'
L1 = 'FB_%s_L1'
L2 = 'FB_%s_L2'
L3 = 'FB_%s_L3'
L4 = 'FB_%s_L4'
PHI = 'FB_%s_PHI'
PSI = 'FB_%s_PSI'
THETA = 'FB_%s_THETA'
A = 'FB_%s_A'
B = 'FB_%s_B'
C = 'FB_%s_C'
D = 'FB_%s_D'
E = 'FB_%s_E'
F = 'FB_%s_F'
from pymbs.symbolics import Graph
class FourBar(Loop):
'''
Explicit Treatment of a Four Bar Linkage Mechanism
'''
'''
Sketch:
B--3--C
/ \
2 4
/ \
A-----1------D
'''
def __init__(self, name, csCa, csCb, posture):
'''
Constructor
@param setup: Four Bar Linkage has two setups: -1, 1
'''
# Assertions
assert ( isinstance(csCa, Frame) )
assert ( isinstance(csCb, Frame) )
assert ( isinstance(posture, int) )
assert ( (posture == 1) or (posture == -1 ))
# Check parents
if (csCa.parentBody.joint is None):
raise ValueError('Loop "%s": Coordinate System "%s" is directly connected to the world!'%(name,csCa.name))
if (csCb.parentBody.joint is None):
raise ValueError('Loop "%s": Coordinate System "%s" is directly connected to the world!'%(name,csCb.name))
# Build complete FourBarLinkage
jB = csCa.parentBody.joint
jD = csCb.parentBody.joint
if (jB.coordSys.parentBody.joint == None):
jB = csCb.parentBody.joint
jD = csCa.parentBody.joint
jA = jB.coordSys.parentBody.joint
csC3 = csCb
csC4 = csCa
else:
jA = jB.coordSys.parentBody.joint
csC3 = csCa
csC4 = csCb
# Do the Joints have the same axis of Rotation
if (jA.Phi == Matrix([1,0,0])):
self.sign = 1
self.pick = Matrix([[0,1,0],
[0,0,1]])
elif (jA.Phi == Matrix([0,1,0])):
self.sign = -1
self.pick = Matrix([[1,0,0],
[0,0,1]])
elif (jA.Phi == Matrix([0,0,1])):
self.sign = 1
self.pick = Matrix([[1,0,0],
[0,1,0]])
else:
raise ValueError('Loop "%s": Axis of Rotation must be either x,y or z!'%name)
assert( jA.Phi == jB.Phi ), 'jA.Phi(%s): %s, jB.Phi(%s): %s'%(jA.name,jA.Phi,jB.name,jB.Phi)
assert( jA.Phi == jD.Phi ), 'jA.Phi(%s): %s, jD.Phi(%s): %s'%(jA.name,jA.Phi,jD.name,jD.Phi)
assert( jA.Psi.norm() == 0 )
assert( jB.Psi.norm() == 0 )
assert( jD.Psi.norm() == 0 )
# Are All Coordinate Systems aligned like their parentBody?
assert( (jA.coordSys.R - eye(3)) == zeros(3) )
assert( (jB.coordSys.R - eye(3)) == zeros(3) )
assert( (jD.coordSys.R - eye(3)) == zeros(3) )
# Check that bodies between joints are the same
assert( jA.coordSys.parentBody == jD.coordSys.parentBody )
assert( jA.body == jB.coordSys.parentBody )
assert( jB.body == csC3.parentBody )
assert( jD.body == csC4.parentBody )
# Super Constructor
Loop.__init__(self, name)
# Save Parameters
self.jA = jA
self.jB = jB
self.jD = jD
self.csC3 = csC3
self.csC4 = csC4
self.posture = posture
# Independent Coordinates
self.u = [jA.q]
self.ud = [jA.qd]
self.udd = [jA.qdd]
# Dependent Coordinates
self.v = [jB.q, jD.q]
self.vd = [jB.qd, jD.qd]
self.vdd = [jB.qdd, jD.qdd]
def calc(self, graph):
'''
Returns precalculated v(u), Bvu and b_prime, s.t.
q = [u,v]', where
u: independent coordinates
v: dependent coordinates
Starting from the Constraint Equation: Phi(q) = 0,
One Obtains by Differentiation:
(d(Phi)/du)*u_dot + (d(Phi)/dv)*v_dot = 0
Ju*u_dot + Jv+v_dot = 0
Thus, v_dot = -(inv(Jv)*Ju)*u_dot = Bvu*u_dot, with Jv = d(Phi)/dv and Ju = d(Phi)/du
Differentiating once more, yields
Ju*u_ddot + Jv*v_ddot + [Ju_dot, Jv_dot]*[u_dot,v_dot]' = 0
Ju*u_ddot + Jv*v_ddot + J_dot*q_dot = 0
Using this relations, one may obtain an expression for v_ddot
v_ddot = -(inv(Jv)*Ju)*u_ddot - inv(Jv)*J_dot*q_dot
= Bvu*u_ddot + b_prime, with b_prime = -inv(Jv)*J_dot*q_dot
Finally one can transform the Equation of Motion
M*q_ddot + h = f + W'*mu
M*(J*u_ddot + b) + h = f + W'*mu with J = [1, Bvu']' and b = [0,b_prime']'
(J'*M*J)*u_ddot + J'*M*b + J'*h = J'*f, since J'*W' = 0
M_star*u_ddot + h_star = f_star
M_star = (J'*M*J)
h_star = J'*M*b + J'*h
f_star = J'*f
'''
assert isinstance(graph, Graph)
# Abbrevations
s = self.sign
# Generalised Coordinates
q1 = self.jA.q # u[0] # angle between x-axes
q1d = self.jA.qd
q2 = self.jB.q # v[0] # angle between x-axes
q2d = self.jB.qd
q3 = self.jD.q # v[1] # angle between x-axes
q3d = self.jD.qd
# Length of bars and angle between x-axis and bar
l1_vec = self.jD.coordSys.p - self.jA.coordSys.p
l1_vec2 = self.pick*l1_vec
l1 = graph.addEquation(L1%self.name, sqrt((transpose(l1_vec)*l1_vec)))
alpha = graph.addEquation(AL%self.name, s*atan2(l1_vec2[1],l1_vec2[0]))
l2_vec = self.jB.coordSys.p
l2_vec2 = self.pick*l2_vec
l2 = graph.addEquation(L2%self.name, sqrt((transpose(l2_vec)*l2_vec)))
beta = graph.addEquation(BE%self.name, s*atan2(l2_vec2[1],l2_vec2[0]))
l3_vec = self.csC3.p
l3_vec2 = self.pick*l3_vec
l3 = graph.addEquation(L3%self.name, sqrt((transpose(l3_vec)*l3_vec)))
gamma = graph.addEquation(GA%self.name, s*atan2(l3_vec2[1],l3_vec2[0]))
l4_vec = self.csC4.p
l4_vec2 = self.pick*l4_vec
l4 = graph.addEquation(L4%self.name, sqrt((transpose(l4_vec)*l4_vec)))
delta = graph.addEquation(DE%self.name, s*atan2(l4_vec2[1],l4_vec2[0]))
# angle between bars
phi_prime = graph.addEquation(PHI%self.name, q1 + beta - alpha)
# A = P1, B = P2, C = P3
#P1 = graph.addEquation(A%self.name, 2*l4*(l1-l2*cos(phi_prime)))
#P2 = graph.addEquation(B%self.name, -2*l2*l4*sin(phi_prime))
#P3 = graph.addEquation(C%self.name, l1**2+l2**2-l3**2+l4**2-2*l1*l2*cos(phi_prime))
# D = P1, E = P2, F = P3
P4 = graph.addEquation(D%self.name, 2*l3*(l2-l1*cos(-phi_prime)))
P5 = graph.addEquation(E%self.name, -2*l1*l3*sin(-phi_prime))
P6 = graph.addEquation(F%self.name, l2**2+l1**2-l4**2+l3**2-2*l2*l1*cos(-phi_prime))
# Calculate v
theta_prime = graph.addEquation(THETA%self.name, 2*atan((P5-self.posture*sqrt(P4**2+P5**2-P6**2))/(P4-P6)))
psi_prime = graph.addEquation(PSI%self.name, ((l2*sin(phi_prime)+l3*sin(phi_prime+theta_prime))/abs(l2*sin(phi_prime)+l3*sin(phi_prime+theta_prime)))*acos((l2*cos(phi_prime)+l3*cos(phi_prime+theta_prime)-l1)/l4))
v1 = (psi_prime + alpha - delta)
v0 = (theta_prime + beta - gamma)
Bvu = Matrix( [[-l2*sin(phi_prime-psi_prime)/(l3*sin(phi_prime+theta_prime-psi_prime))-1], [(l2*sin(theta_prime))/(l4*sin(phi_prime+theta_prime-psi_prime))]] )
b_prime = Matrix( [-(q1d**2*l2*cos(phi_prime-psi_prime)+l3*cos(phi_prime+theta_prime-psi_prime)*(q1d+q2d)**2-l4*q3d**2)/(l3*sin(phi_prime+theta_prime-psi_prime)) , -(q1d**2*l2*cos(theta_prime)+l3*(q1d+q2d)**2-l4*q3d**2*cos(phi_prime+theta_prime-psi_prime))/(l4*sin(phi_prime+theta_prime-psi_prime)) ] )
return ([v0,v1],Bvu,b_prime)
def applyConstraintLoads(self):
'''
apply Constraint Forces at the end of the cut
'''
# locking all directions perpendicular to axis of rotation
transLock = [0,0,0]
for i in [0,1,2]:
if (self.jA.Phi[i] == 0):
transLock[i] = 1
# apply Constraint
c = Constraint(name='Constraint_%s'%self.name, parent=self.csC3, child=self.csC4, \
transLock=transLock, rotLock=[0,0,0], active=False)
# return load object
return c
| 0
| 0
| 0
|
127d839e1bbc55e99f4f321f7c332ef610cb53d8
| 1,812
|
py
|
Python
|
earth_enterprise/src/server/wsgi/wms/ogc/common/image_specs.py
|
ezeeyahoo/earthenterprise
|
b6cac9e6228946f2f17d1edb75e118aeb3e8e8c9
|
[
"Apache-2.0"
] | 2,661
|
2017-03-20T22:12:50.000Z
|
2022-03-30T09:43:19.000Z
|
earth_enterprise/src/server/wsgi/wms/ogc/common/image_specs.py
|
ezeeyahoo/earthenterprise
|
b6cac9e6228946f2f17d1edb75e118aeb3e8e8c9
|
[
"Apache-2.0"
] | 1,531
|
2017-03-24T17:20:32.000Z
|
2022-03-16T18:11:14.000Z
|
earth_enterprise/src/server/wsgi/wms/ogc/common/image_specs.py
|
ezeeyahoo/earthenterprise
|
b6cac9e6228946f2f17d1edb75e118aeb3e8e8c9
|
[
"Apache-2.0"
] | 990
|
2017-03-24T11:54:28.000Z
|
2022-03-22T11:51:47.000Z
|
#!/usr/bin/env python2.7
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Holds meta-information about the image formats we support."""
import collections
ImageSpec = collections.namedtuple(
"ImageSpec", "content_type file_extension pil_format")
IMAGE_SPECS = {"jpg": ImageSpec("image/jpeg", "jpg", "JPEG"),
"png": ImageSpec("image/png", "png", "PNG")
}
def IsKnownFormat(fmt):
"""Checks if the format is supported.
Args:
fmt: Format of the image.
Returns:
boolean: If the format is supported.
"""
for spec in IMAGE_SPECS.values():
if spec.content_type == fmt:
return True
return False
def GetImageSpec(fmt):
"""Get the Imagespec.
Args:
fmt: Format of the image.
Returns:
image_spec: image spec.
"""
for spec in IMAGE_SPECS.values():
if spec.content_type == fmt:
return spec
return None
def FormatIsPng(fmt):
"""Checks if the format is of type png.
Args:
fmt: Format of the image.
Returns:
boolean: If the format is png or not.
"""
for typ, spec in IMAGE_SPECS.iteritems():
if spec.content_type == fmt:
return typ == "png"
return False
if __name__ == "__main__":
main()
| 22.65
| 74
| 0.679912
|
#!/usr/bin/env python2.7
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Holds meta-information about the image formats we support."""
import collections
ImageSpec = collections.namedtuple(
"ImageSpec", "content_type file_extension pil_format")
IMAGE_SPECS = {"jpg": ImageSpec("image/jpeg", "jpg", "JPEG"),
"png": ImageSpec("image/png", "png", "PNG")
}
def IsKnownFormat(fmt):
"""Checks if the format is supported.
Args:
fmt: Format of the image.
Returns:
boolean: If the format is supported.
"""
for spec in IMAGE_SPECS.values():
if spec.content_type == fmt:
return True
return False
def GetImageSpec(fmt):
"""Get the Imagespec.
Args:
fmt: Format of the image.
Returns:
image_spec: image spec.
"""
for spec in IMAGE_SPECS.values():
if spec.content_type == fmt:
return spec
return None
def FormatIsPng(fmt):
"""Checks if the format is of type png.
Args:
fmt: Format of the image.
Returns:
boolean: If the format is png or not.
"""
for typ, spec in IMAGE_SPECS.iteritems():
if spec.content_type == fmt:
return typ == "png"
return False
def main():
is_format = IsKnownFormat("jpeg")
print is_format
if __name__ == "__main__":
main()
| 44
| 0
| 23
|
9b3205aefcc2508985db4f069099edf5e7dbfa1b
| 662
|
py
|
Python
|
ClassFromQueryGenerator/CRUDPyMacros/Update.py
|
UnstableMutex/ClassFromQueryGenerator
|
5de03f61059d2c61783a9b66ab4e11060343e803
|
[
"MIT"
] | null | null | null |
ClassFromQueryGenerator/CRUDPyMacros/Update.py
|
UnstableMutex/ClassFromQueryGenerator
|
5de03f61059d2c61783a9b66ab4e11060343e803
|
[
"MIT"
] | null | null | null |
ClassFromQueryGenerator/CRUDPyMacros/Update.py
|
UnstableMutex/ClassFromQueryGenerator
|
5de03f61059d2c61783a9b66ab4e11060343e803
|
[
"MIT"
] | null | null | null |
comma=","
result="SET ANSI_NULLS ON\n"
result+="GO\n"
result+="SET QUOTED_IDENTIFIER ON\n"
result+="GO\n"
result+="CREATE PROCEDURE "+Model.TableName+"_Update\n"
result+=mapcols(pars)
result+="AS\n"
result+="BEGIN\n"
result+="SET NOCOUNT ON;\n"
result+="update [dbo].["+Model.TableName+"]\n"
result+=" set ("
result+=mapusual(sqf)
result+=")\n"
result+="WHERE " +Model.PK.Name+"=@"+Model.PK.Name+"\n"
result+="END\n"
result+="GO\n"
| 24.518519
| 55
| 0.669184
|
def sqf(col):
return "["+col.Name+"] = @"+col.Name
def pars(col):
return "@"+col.Name+" "+col.SQLType+"\n"
comma=","
def mapcols(f):
return comma.join(map(f,Model.Columns))
def mapusual(f):
return comma.join(map(f,Model.UsualColumns))
result="SET ANSI_NULLS ON\n"
result+="GO\n"
result+="SET QUOTED_IDENTIFIER ON\n"
result+="GO\n"
result+="CREATE PROCEDURE "+Model.TableName+"_Update\n"
result+=mapcols(pars)
result+="AS\n"
result+="BEGIN\n"
result+="SET NOCOUNT ON;\n"
result+="update [dbo].["+Model.TableName+"]\n"
result+=" set ("
result+=mapusual(sqf)
result+=")\n"
result+="WHERE " +Model.PK.Name+"=@"+Model.PK.Name+"\n"
result+="END\n"
result+="GO\n"
| 141
| 0
| 89
|
e33365306faf8e05ad78b480b5ad8b2e0c36c04f
| 6,338
|
py
|
Python
|
tests/core/testRpg.py
|
rrpg/engine
|
989f701b82aa7c73ea98003eed13077e5d6f15f9
|
[
"MIT"
] | 2
|
2016-04-07T23:36:46.000Z
|
2016-12-20T15:35:17.000Z
|
tests/core/testRpg.py
|
rrpg/engine
|
989f701b82aa7c73ea98003eed13077e5d6f15f9
|
[
"MIT"
] | 5
|
2016-02-04T16:28:33.000Z
|
2016-03-18T17:02:07.000Z
|
tests/core/testRpg.py
|
rrpg/engine
|
989f701b82aa7c73ea98003eed13077e5d6f15f9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import unittest
import tests.common
import core
from core.localisation import _
from core import Rpg
import models.player
from models.saved_game import saved_game
import json
import sqlite3
| 37.502959
| 297
| 0.736668
|
# -*- coding: utf-8 -*-
import unittest
import tests.common
import core
from core.localisation import _
from core import Rpg
import models.player
from models.saved_game import saved_game
import json
import sqlite3
class rpgTests(tests.common.common):
idSavedGame = 1
idFaultySavedGame = 2
idEmptySavedGame = 3
incorrectIdSavedGame = 42
def test_unknown_world(self):
rpgEngine = Rpg.Rpg()
try:
rpgEngine.initWorld("some/unexisting/world")
except core.exception.exception as e:
self.assertEquals(str(e), _('ERROR_UNKNOWN_SELECTED_WORLD'))
def test_invalid_saved_game_id(self):
rpgEngine = Rpg.Rpg()
rpgEngine.initWorld(self.dbFile)
with self.assertRaises(core.exception.exception) as raised:
rpgEngine.initSavedGame(self.incorrectIdSavedGame)
self.assertEquals(str(raised.exception), _('ERROR_RRPG_INIT_INVALID_SAVED_GAME_ID'))
def test_load_player_with_no_save(self):
rpgEngine = Rpg.Rpg()
rpgEngine.initWorld(self.dbFile)
with self.assertRaises(core.exception.exception) as raised:
rpgEngine.initPlayer()
self.assertEquals(str(raised.exception), _('ERROR_SAVED_GAME_NEEDED_TO_INIT_PLAYER'))
def test_load_player_with_empty_save(self):
rpgEngine = Rpg.Rpg()
rpgEngine.initWorld(self.dbFile)
rpgEngine.initSavedGame(self.idEmptySavedGame)
with self.assertRaises(core.exception.exception) as raised:
rpgEngine.initPlayer()
self.assertEquals(str(raised.exception), _('ERROR_NON_EMPTY_SAVED_GAME_NEEDED_TO_INIT_PLAYER'))
def test_load_player_with_faulty_save(self):
rpgEngine = Rpg.Rpg()
rpgEngine.initWorld(self.dbFile)
rpgEngine.initSavedGame(self.idFaultySavedGame)
with self.assertRaises(core.exception.exception) as raised:
rpgEngine.initPlayer()
self.assertEquals(str(raised.exception), _('ERROR_CONNECT_INVALID_CREDENTIALS'))
def test_invalid_world(self):
rpgEngine = Rpg.Rpg()
rpgEngine.initWorld("tests/invalidDB")
rpgEngine.initSavedGame(self.idSavedGame)
self.assertRaises(sqlite3.OperationalError, rpgEngine.initPlayer)
def test_invalid_action_format(self):
with self.assertRaises(TypeError) as raised:
self.rpg.setAction("Not list action")
self.assertEquals(str(raised.exception), _('ERROR_INVALID_FORMAT_ACTION'))
def test_invalid_action_text(self):
self.rpg.setAction(["Unknown action"])
output = self.rpg._runAction()
self.assertEquals(output, _('ERROR_UNKNOWN_COMMAND'))
def test_invalid_action_json(self):
self.rpg.setAction(["Unknown action"])
output = self.rpg._runAction(True)
self.assertEquals(output, {'error': {'message': _('ERROR_UNKNOWN_COMMAND'), 'code': 1}})
def compareSavedGamesSaveOk(self):
saves = saved_game.loadAll()
expectedSaves = [
{
'id_saved_game': 1,
'snapshot_player': '{"id_gender": 1, "name": "TEST_PLAYER_SOME", "id_character": 4, "id_player": 3, "stat_defence": 2, "stat_attack": 4, "stat_max_hp": 20, "inventory": null, "id_area": 1, "stat_current_hp": 20, "login": "TEST_PLAYER_SOME", "stat_speed": 2, "id_species": 1, "stat_luck": 10}',
'id_player': 3,
'id_character': 4
},
{
'id_saved_game': 2,
'snapshot_player': '{"id_gender": 1, "name": "TEST_PLAYER2bis", "id_character": 3, "id_player": 2, "stat_defence": 2, "stat_attack": 4, "stat_max_hp": 20, "inventory": null, "id_area": 1, "stat_current_hp": 20, "login": "TEST_PLAYER2bis", "stat_speed": 2, "id_species": 1, "stat_luck": 10}',
'id_player': 2,
'id_character': 3
},
{
'id_saved_game': 3,
'snapshot_player': '',
'id_player': None,
'id_character': None
}
]
self.assertEquals(saves, expectedSaves)
def compareSavedGamesSaveKo(self):
saves = saved_game.loadAll()
expectedSaves = [
{
'id_saved_game': 1,
'snapshot_player': '{"id_gender": 1, "name": "TEST_PLAYER", "id_character": 2, "id_player": 1, "stat_defence": 2, "stat_attack": 4, "stat_max_hp": 20, "inventory": null, "id_area": 1, "stat_current_hp": 20, "login": "TEST_PLAYER", "stat_speed": 2, "id_species": 1, "stat_luck": 10}',
'id_player': 1,
'id_character': 2
},
{
'id_saved_game': 2,
'snapshot_player': '{"id_gender": 1, "name": "TEST_PLAYER2bis", "id_character": 3, "id_player": 2, "stat_defence": 2, "stat_attack": 4, "stat_max_hp": 20, "inventory": null, "id_area": 1, "stat_current_hp": 20, "login": "TEST_PLAYER2bis", "stat_speed": 2, "id_species": 1, "stat_luck": 10}',
'id_player': 2,
'id_character': 3
},
{
'id_saved_game': 3,
'snapshot_player': '',
'id_player': None,
'id_character': None
}
]
self.assertEquals(saves, expectedSaves)
def test_login_already_used(self):
with self.assertRaises(models.player.exception) as raised:
self.rpg.createPlayer('TEST_PLAYER', 1, 1)
self.assertEquals(str(raised.exception), _('ERROR_SIGNUP_LOGIN_ALREADY_USED'))
self.compareSavedGamesSaveKo()
def test_invalid_gender(self):
with self.assertRaises(models.player.exception) as raised:
self.rpg.createPlayer('TEST_PLAYER_SOME', 'some gender', 1)
self.assertEquals(str(raised.exception), _('ERROR_SIGNUP_INVALID_GENDER'))
self.compareSavedGamesSaveKo()
def test_invalid_species(self):
with self.assertRaises(models.player.exception) as raised:
self.rpg.createPlayer('TEST_PLAYER_SOME', 1, 'some species')
self.assertEquals(str(raised.exception), _('ERROR_SIGNUP_INVALID_SPECIES'))
self.compareSavedGamesSaveKo()
def test_ok(self):
self.rpg.createPlayer('TEST_PLAYER_SOME', '1', '1')
self.compareSavedGamesSaveOk()
def test_command_with_no_saved_game(self):
rpgEngine = Rpg.Rpg()
rpgEngine.setAction([_('LOOK_COMMAND')])
with self.assertRaises(core.exception.exception) as raised:
rpgEngine._runAction(True)
self.assertEquals(str(raised.exception), _('ERROR_SAVED_GAME_NEEDED_TO_RUN_ACTION'))
def test_command_with_no_player(self):
rpgEngine = Rpg.Rpg()
rpgEngine.initWorld(self.dbFile)
rpgEngine.initSavedGame(self.idEmptySavedGame)
rpgEngine.setAction([_('LOOK_COMMAND')])
with self.assertRaises(core.exception.exception) as raised:
rpgEngine._runAction(True)
self.assertEquals(str(raised.exception), _('ERROR_CONNECTED_PLAYER_NEEDED_FOR_COMMAND'))
def test_run_action_with_no_action(self):
with self.assertRaises(core.exception.exception) as raised:
self.rpg._runAction()
self.assertEquals(str(raised.exception), _('ERROR_NO_ACTION_SET'))
| 5,562
| 536
| 23
|