max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
atlas/foundations_rest_api/src/foundations_rest_api/versioning.py
|
DeepLearnI/atlas
| 296
|
12775651
|
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution('foundations_rest_api').version
except DistributionNotFound:
__version__ = None
| 1.507813
| 2
|
ImageAnalysis/ImageAnalysis/python/references/bead-designer-test/gui/designer.py
|
mikebourbeauart/PerlerPrinter
| 0
|
12775652
|
<reponame>mikebourbeauart/PerlerPrinter
"""
designer.py
"""
import warnings
warnings.filterwarnings("ignore")
import os
import sys
import wx
import wx
import beadgui
#Some constants
#BEAD_RADIUS = 1.75*mm
#BEAD_THICKNESS = 1*mm
#BOARD_SPACING = 4.85*mm
#BOARD_BORDER = 4*mm
#Some notes
#A4 60x43 = 2580
#A3 86x60 = 5160
#A2 86x120 = 10,320
#MARQUEE A4+A4 = 120x43
# Implementing Designer
class BeadDesignDesigner( beadgui.Designer ):
def __init__( self, parent ):
beadgui.Designer.__init__( self, parent )
self.addPDFWindow()
def addPDFWindow(self):
from wx.lib.pdfwin import PDFWindow
self.pdf = PDFWindow(self.m_panel2, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize)
self.previewPane.Add(self.pdf, proportion=1, flag=wx.EXPAND)
self.pdf.Raise()
self.SetSize((780,620))
self.loadPDF()
def loadPDF(self, pdf="C:\\code\\pil\\images\\title.pdf"):
self.pdf.LoadFile(pdf)
self.pdf.setView('FitB')
#self.pdf.Show()
#==========================================================================================
# Handlers for Designer events.
def onImageSelect(self, event):
pass
def onAbout(self, event):
info = wx.AboutDialogInfo()
info.SetIcon(wx.Icon('C:\\code\\pil\\images\\jon.png', wx.BITMAP_TYPE_PNG))
info.SetName('Bead Pattern Designer')
info.SetVersion('1.0')
info.SetDescription("A simple utility to generate patterns for HAMA beads")
info.SetCopyright('(C) 2011 <NAME>')
info.SetWebSite('https://sites.google.com/site/degenatrons/')
info.AddDeveloper('<NAME>')
wx.AboutBox(info)
def onGenerate( self, event ):
# TODO: Implement onGenerate
pass
def onImage( self, event ):
# TODO: Implement onImage
pass
def onView( self, event ):
# TODO: Implement onView
pass
def onLoadImage( self, event ):
# TODO: Implement onLoadImage
pass
def onExit( self, event ):
# TODO: Implement onExit
pass
def main():
app = wx.App(False)
BeadDesignDesigner(None).Show()
app.MainLoop()
app.Destroy()
if __name__ == "__main__":
main()
| 2.046875
| 2
|
proxystore/test/test_utils.py
|
gpauloski/ProxyStore
| 2
|
12775653
|
<reponame>gpauloski/ProxyStore
"""Utils Unit Tests"""
from proxystore import utils
from proxystore.factory import SimpleFactory
def test_create_key() -> None:
"""Test create_key()"""
assert isinstance(utils.create_key(42), str)
def test_fullname() -> None:
"""Test fullname()"""
assert utils.fullname(SimpleFactory) == 'proxystore.factory.SimpleFactory'
assert (
utils.fullname(SimpleFactory('string'))
== 'proxystore.factory.SimpleFactory'
)
assert utils.fullname('string') == 'str'
| 2.28125
| 2
|
tests/watson/dev/test_middleware.py
|
watsonpy/watson-dev
| 0
|
12775654
|
<filename>tests/watson/dev/test_middleware.py<gh_stars>0
# -*- coding: utf-8 -*-
import os
from watson.dev.middleware import StaticFileMiddleware
from tests.watson.dev.support import sample_app, sample_environ, sample_start_response
class TestStaticFileMiddleware(object):
def test_create(self):
mw = StaticFileMiddleware(sample_app, os.path.dirname(__file__))
assert mw.app == sample_app
assert mw.initial_dir == os.path.dirname(__file__)
def test_execute(self):
mw = StaticFileMiddleware(sample_app, os.path.dirname(__file__))
environ = sample_environ(PATH_INFO='/sample.css')
response = mw(environ, sample_start_response)
assert response == [b'html, body { background: red; }']
def test_run_app(self):
mw = StaticFileMiddleware(sample_app, os.path.dirname(__file__))
environ = sample_environ(PATH_INFO='/blah')
mw(environ, sample_start_response)
| 2.296875
| 2
|
arangodol/__init__.py
|
i2mint/arangodol
| 1
|
12775655
|
<reponame>i2mint/arangodol
"""
arango with a simple (dict-like or list-like) interface
"""
from dol.base import Persister
from pyArango.connection import Connection
from pyArango.theExceptions import DocumentNotFoundError
class ArangoDbPersister(Persister):
"""
A basic ArangoDB persister.
>>> from py2store.persisters.arangodb_w_pyarango import ArangoDbPersister
>>> s = ArangoDbPersister()
>>> k = {'key': '777'} # Each collection will happily accept user-defined _key values.
>>> v = {'val': 'bar'}
>>> for _key in s:
... del s[_key]
...
>>> k in s
False
>>> len(s)
0
>>> s[k] = v
>>> len(s)
1
>>> s[k]
{'val': 'bar'}
>>> s.get(k)
{'val': 'bar'}
>>> s.get({'not': 'a key'}, {'default': 'val'}) # testing s.get with default
{'default': 'val'}
>>> list(s.values())
[{'val': 'bar'}]
>>> k in s # testing __contains__ again
True
>>> del s[k]
>>> len(s)
0
>>> s = ArangoDbPersister(db_name='py2store', key_fields=('name',))
>>> for _key in s:
... del s[_key]
...
>>> s[{'name': 'guido'}] = {'yob': 1956, 'proj': 'python', 'bdfl': False}
>>> s[{'name': 'guido'}]
{'yob': 1956, 'proj': 'python', 'bdfl': False}
>>> s[{'name': 'vitalik'}] = {'yob': 1994, 'proj': 'ethereum', 'bdfl': True}
>>> s[{'name': 'vitalik'}]
{'yob': 1994, 'proj': 'ethereum', 'bdfl': True}
>>> for key, val in s.items():
... print(f"{key}: {val}")
{'name': 'guido'}: {'yob': 1956, 'proj': 'python', 'bdfl': False}
{'name': 'vitalik'}: {'yob': 1994, 'proj': 'ethereum', 'bdfl': True}
"""
# reserved by the database fields
_reserved = {"_key", "_id", "_rev"}
def __init__(
self,
user="root",
password="<PASSWORD>",
url="http://127.0.0.1:8529",
db_name="py2store",
collection_name="test",
key_fields=("key",), # _id, _key and _rev are reserved by db
key_fields_separator="::",
):
self._connection = Connection(
arangoURL=url, username=user, password=password,
)
self._db_name = db_name
self._collection_name = collection_name
# If DB not created:
if not self._connection.hasDatabase(self._db_name):
self._connection.createDatabase(self._db_name)
self._adb = self._connection[self._db_name]
# If collection not created:
if not self._adb.hasCollection(self._collection_name):
self._collection = self._adb.createCollection(
name=self._collection_name
)
self._collection = self._adb[self._collection_name]
if isinstance(key_fields, str):
key_fields = (key_fields,)
self._key_fields = key_fields
self._key_fields_separator = key_fields_separator
def _make_key(self, keys_dict):
"""
Convert a dict of keys into a real key-string by joining dict values in a predefined order.
DB requirements for the key:
The key must be a string value.
Keys are case-sensitive.
Numeric keys are not allowed.
The key must be from 1 byte to 254 bytes long.
It must consist of:
- letters a-z (lower or upper case),
- digits 0-9
- any of the following characters: _ - : . @ ( ) + , = ; $ ! * ' %
Any other characters cannot be used inside key values.
"""
key_values = [keys_dict[key_label] for key_label in self._key_fields]
key_str = self._key_fields_separator.join(key_values)
return key_str
def _split_key(self, joined_key_str):
"""
Convert a key-string used by DB internally
into a user-friendly dict of key labels and values.
"""
key_values = joined_key_str.split(self._key_fields_separator)
keys_dict = dict(zip(self._key_fields, key_values))
return keys_dict
def __fetchitem__(self, keys_dict):
key = self._make_key(keys_dict)
try:
return self._collection[key]
except DocumentNotFoundError:
raise KeyError(f"No document found for query: {keys_dict}")
def __getitem__(self, keys_dict):
item = self.__fetchitem__(keys_dict)
doc = item.getStore()
# todo (Mike): maybe move this cleanup to a base Arango Store?
# exclude reserved keys and corresponded values
data = {
key: doc[key]
for key in doc
if key not in self._reserved and key not in self._key_fields
}
return data
def __setitem__(self, keys_dict, values_dict):
try:
doc = self.__fetchitem__(keys_dict)
except KeyError:
doc = self._collection.createDocument()
doc._key = self._make_key(keys_dict)
for k, v in values_dict.items():
doc[k] = v
doc.save()
def __delitem__(self, keys_dict):
doc = self.__fetchitem__(keys_dict)
doc.delete()
def __iter__(self):
docs = self._collection.fetchAll()
yield from (
{key_name: doc[key_name] for key_name in self._key_fields}
for doc in docs
)
def __len__(self):
return self._collection.count()
#######################################################################################################################
# Stores
from functools import wraps
from dol.base import Store
from py2store.util import lazyprop
class ArangoDbStore(Store):
@wraps(ArangoDbPersister.__init__)
def __init__(self, *args, **kwargs):
persister = ArangoDbPersister(*args, **kwargs)
super().__init__(persister)
class ArangoDbTupleKeyStore(ArangoDbStore):
"""
ArangoDbStore using tuple keys.
>>> from py2store.stores.arangodb_store import ArangoDbTupleKeyStore
>>> s = ArangoDbTupleKeyStore(collection_name='test', key_fields=('key', 'user'))
>>> k = (1234, 'user')
>>> v = {'name': 'bob', 'age': 42}
>>> if k in s: # deleting all docs in tmp
... del s[k]
>>> assert (k in s) == False # see that key is not in store (and testing __contains__)
>>> orig_length = len(s)
>>> s[k] = v
>>> assert len(s) == orig_length + 1
>>> assert k in list(s)
>>> assert s[k] == v
>>> assert s.get(k) == v
>>> assert v in list(s.values())
>>> assert (k in s) == True # testing __contains__ again
>>> del s[k]
>>> assert len(s) == orig_length
"""
@lazyprop
def _key_fields(self):
return self.store._key_fields
def _id_of_key(self, k):
return {
field: field_val for field, field_val in zip(self._key_fields, k)
}
def _key_of_id(self, _id):
return tuple(_id[x] for x in self._key_fields)
| 2.578125
| 3
|
aztk/spark/client/cluster/helpers/run.py
|
atg-abhishek/aztk
| 0
|
12775656
|
<gh_stars>0
import azure.batch.models.batch_error as batch_error
from aztk import error
from aztk.utils import helpers
def cluster_run(core_cluster_operations,
cluster_id: str,
command: str,
host=False,
internal: bool = False,
timeout=None):
try:
return core_cluster_operations.run(
cluster_id, command, internal, container_name="spark" if not host else None, timeout=timeout)
except batch_error.BatchErrorException as e:
raise error.AztkError(helpers.format_batch_exception(e))
| 2.296875
| 2
|
integration_tests/integrations/jinja/build.py
|
dxw/ukti_template
| 2
|
12775657
|
<gh_stars>1-10
from jinja2 import Environment, FileSystemLoader
env = Environment(loader=FileSystemLoader([
'vendor/jinja_ukti_template/views/layouts',
'templates'
]))
template = env.get_template('test_template.html')
content = template.render({
"html_lang": "rb",
"skip_link_message": "Custom skip link text",
"logo_link_title": "Custom logo link title text",
})
with open("../../html_for_testing/jinja_integration_test_app.html", "w") as static_file:
static_file.write(content)
| 2.21875
| 2
|
allennlp_models/tagging/predictors/sentence_tagger.py
|
matt-peters/allennlp-models
| 402
|
12775658
|
from allennlp.predictors.sentence_tagger import SentenceTaggerPredictor # noqa: F401
# This component lives in the main repo because we need it there for tests.
| 1.210938
| 1
|
openstack/tests/unit/load_balancer/test_amphora.py
|
NeCTAR-RC/openstacksdk
| 0
|
12775659
|
# Copyright 2019 Rackspace, US Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.tests.unit import base
import uuid
from openstack.load_balancer.v2 import amphora
IDENTIFIER = uuid.uuid4()
LB_ID = uuid.uuid4()
LISTENER_ID = uuid.uuid4()
COMPUTE_ID = uuid.uuid4()
VRRP_PORT_ID = uuid.uuid4()
HA_PORT_ID = uuid.uuid4()
IMAGE_ID = uuid.uuid4()
COMPUTE_FLAVOR = uuid.uuid4()
AMPHORA_ID = uuid.uuid4()
EXAMPLE = {
'id': IDENTIFIER,
'loadbalancer_id': LB_ID,
'compute_id': COMPUTE_ID,
'lb_network_ip': '192.168.1.2',
'vrrp_ip': '192.168.1.5',
'ha_ip': '192.168.1.10',
'vrrp_port_id': VRRP_PORT_ID,
'ha_port_id': HA_PORT_ID,
'cert_expiration': '2019-09-19 00:34:51',
'cert_busy': 0,
'role': 'MASTER',
'status': 'ALLOCATED',
'vrrp_interface': 'eth1',
'vrrp_id': 1,
'vrrp_priority': 100,
'cached_zone': 'zone1',
'created_at': '2017-05-10T18:14:44',
'updated_at': '2017-05-10T23:08:12',
'image_id': IMAGE_ID,
'compute_flavor': COMPUTE_FLAVOR
}
class TestAmphora(base.TestCase):
def test_basic(self):
test_amphora = amphora.Amphora()
self.assertEqual('amphora', test_amphora.resource_key)
self.assertEqual('amphorae', test_amphora.resources_key)
self.assertEqual('/octavia/amphorae', test_amphora.base_path)
self.assertFalse(test_amphora.allow_create)
self.assertTrue(test_amphora.allow_fetch)
self.assertFalse(test_amphora.allow_commit)
self.assertFalse(test_amphora.allow_delete)
self.assertTrue(test_amphora.allow_list)
def test_make_it(self):
test_amphora = amphora.Amphora(**EXAMPLE)
self.assertEqual(IDENTIFIER, test_amphora.id)
self.assertEqual(LB_ID, test_amphora.loadbalancer_id)
self.assertEqual(COMPUTE_ID, test_amphora.compute_id)
self.assertEqual(EXAMPLE['lb_network_ip'], test_amphora.lb_network_ip)
self.assertEqual(EXAMPLE['vrrp_ip'], test_amphora.vrrp_ip)
self.assertEqual(EXAMPLE['ha_ip'], test_amphora.ha_ip)
self.assertEqual(VRRP_PORT_ID, test_amphora.vrrp_port_id)
self.assertEqual(HA_PORT_ID, test_amphora.ha_port_id)
self.assertEqual(EXAMPLE['cert_expiration'],
test_amphora.cert_expiration)
self.assertEqual(EXAMPLE['cert_busy'], test_amphora.cert_busy)
self.assertEqual(EXAMPLE['role'], test_amphora.role)
self.assertEqual(EXAMPLE['status'], test_amphora.status)
self.assertEqual(EXAMPLE['vrrp_interface'],
test_amphora.vrrp_interface)
self.assertEqual(EXAMPLE['vrrp_id'], test_amphora.vrrp_id)
self.assertEqual(EXAMPLE['vrrp_priority'], test_amphora.vrrp_priority)
self.assertEqual(EXAMPLE['cached_zone'], test_amphora.cached_zone)
self.assertEqual(EXAMPLE['created_at'], test_amphora.created_at)
self.assertEqual(EXAMPLE['updated_at'], test_amphora.updated_at)
self.assertEqual(IMAGE_ID, test_amphora.image_id)
self.assertEqual(COMPUTE_FLAVOR, test_amphora.compute_flavor)
self.assertDictEqual(
{'limit': 'limit',
'marker': 'marker',
'id': 'id',
'loadbalancer_id': 'loadbalancer_id',
'compute_id': 'compute_id',
'lb_network_ip': 'lb_network_ip',
'vrrp_ip': 'vrrp_ip',
'ha_ip': 'ha_ip',
'vrrp_port_id': 'vrrp_port_id',
'ha_port_id': 'ha_port_id',
'cert_expiration': 'cert_expiration',
'cert_busy': 'cert_busy',
'role': 'role',
'status': 'status',
'vrrp_interface': 'vrrp_interface',
'vrrp_id': 'vrrp_id',
'vrrp_priority': 'vrrp_priority',
'cached_zone': 'cached_zone',
'created_at': 'created_at',
'updated_at': 'updated_at',
'image_id': 'image_id',
'image_id': 'image_id'
},
test_amphora._query_mapping._mapping)
class TestAmphoraConfig(base.TestCase):
def test_basic(self):
test_amp_config = amphora.AmphoraConfig()
self.assertEqual('/octavia/amphorae/%(amphora_id)s/config',
test_amp_config.base_path)
self.assertFalse(test_amp_config.allow_create)
self.assertFalse(test_amp_config.allow_fetch)
self.assertTrue(test_amp_config.allow_commit)
self.assertFalse(test_amp_config.allow_delete)
self.assertFalse(test_amp_config.allow_list)
class TestAmphoraFailover(base.TestCase):
def test_basic(self):
test_amp_failover = amphora.AmphoraFailover()
self.assertEqual('/octavia/amphorae/%(amphora_id)s/failover',
test_amp_failover.base_path)
self.assertFalse(test_amp_failover.allow_create)
self.assertFalse(test_amp_failover.allow_fetch)
self.assertTrue(test_amp_failover.allow_commit)
self.assertFalse(test_amp_failover.allow_delete)
self.assertFalse(test_amp_failover.allow_list)
| 1.546875
| 2
|
run_fetcher.py
|
pogestudio/coinmarketcap-to-mongodb
| 0
|
12775660
|
# coding: utf-8
from lxml import html
import requests
import pandas as pd
import config
# DATABASE THINGS
# this one is used by the database
import pymongo as pm
# connect to the database choosing the correct collection
mongo_db_url = config.mongo_db_url
mongo_client = pm.MongoClient(mongo_db_url)
db = mongo_client[config.database_name] # the selected database on your mongo server
collection = db[config.collection_name] # the collection to which to write the data
baseURL = 'https://coinmarketcap.com/historical/';
snapshotDates = ['20130505', '20130512', '20130519', '20130526', '20130602', '20130609', '20130616', '20130623', '20130630', '20130707', '20130714', '20130721', '20130728', '20130804', '20130811',
'20130818', '20130825', '20130901', '20130908', '20130915', '20130922', '20130929', '20131006', '20131013', '20131020', '20131027', '20131103', '20131110', '20131117', '20131124', '20131201',
'20131208', '20131215', '20131222', '20131229', '20140105', '20140112', '20140119', '20140126', '20140202', '20140209', '20140216', '20140223', '20140302', '20140309', '20140316', '20140323',
'20140330', '20140406', '20140413', '20140420', '20140427', '20140504', '20140511', '20140518', '20140525', '20140601', '20140608', '20140615', '20140622', '20140629', '20140706', '20140713',
'20140720', '20140727', '20140803', '20140810', '20140817', '20140824', '20140831', '20140907', '20140914', '20140921', '20140928', '20141005', '20141012', '20141019', '20141026', '20141102',
'20141109', '20141116', '20141123', '20141130', '20141207', '20141214', '20141221', '20141228', '20150104', '20150111', '20150118', '20150125', '20150201', '20150208', '20150215', '20150222',
'20150301', '20150308', '20150315', '20150322', '20150329', '20150405', '20150412', '20150419', '20150426', '20150503', '20150510', '20150517', '20150524', '20150531', '20150607', '20150614',
'20150621', '20150628', '20150705', '20150712', '20150719', '20150726', '20150802', '20150809', '20150816', '20150823', '20150830', '20150906', '20150913', '20150920', '20150927', '20151004',
'20151011', '20151018', '20151025', '20151101', '20151108', '20151115', '20151122', '20151129', '20151206', '20151213', '20151220', '20151227', '20160103', '20160110', '20160117', '20160124',
'20160131', '20160207', '20160214', '20160221', '20160228', '20160306', '20160313', '20160320', '20160327', '20160403', '20160410', '20160417', '20160424', '20160501', '20160508', '20160515',
'20160522', '20160529', '20160605', '20160612', '20160619', '20160626', '20160703', '20160710', '20160717', '20160724', '20160731', '20160807', '20160814', '20160821', '20160828', '20160904',
'20160911', '20160918', '20160925', '20161002', '20161009', '20161016', '20161023', '20161030', '20161106', '20161113', '20161120', '20161127', '20161204', '20161211', '20161218', '20161225',
'20170101', '20170108', '20170115', '20170122', '20170129', '20170205', '20170212', '20170219', '20170226', '20170305', '20170312', '20170319', '20170326', '20170402', '20170409', '20170416',
'20170423', '20170430', '20170507', '20170514', '20170521', '20170528', '20170604', '20170611', '20170618', '20170625', '20170702', '20170709', '20170716', '20170723', '20170730', '20170806',
'20170813', '20170820', '20170827', '20170903', '20170910', '20170917', '20170924', '20171001', '20171008', '20171015', '20171022', '20171029', '20171105', '20171112', '20171119', '20171126',
'20171203', '20171210', '20171217', '20171224', '20171231', '20180107', '20180114', '20180121', '20180128', '20180204', '20180211', '20180218', '20180225', '20180304', '20180311', '20180318',
'20180325', '20180401', '20180408', '20180415', '20180422', '20180429', '20180506', '20180513', '20180520', '20180527', '20180603', '20180610', '20180617', '20180624', '20180701', '20180708',
'20180715', '20180722', '20180729', '20180805', '20180812', '20180819', '20180826', '20180902', '20180909', '20180916', '20180923', '20180930', '20181007']
start_amount = len(snapshotDates)
def maybe_float( text ):
try:
if isinstance(text, pd.Series):
return float(text.tolist()[0])
return float(text)
except (ValueError, IndexError):
return 0
def parse_snapshot( date ):
fullURL = baseURL + date + '/';
if config.DEBUG:
print("starting URL parsing snapshot for: " + date);
resp = requests.get(fullURL)
h = html.fromstring(resp.content)
names = h.xpath('//a[@class="currency-name-container link-secondary"]/text()')
symbols = h.xpath('//td[@class="text-left col-symbol"]/text()')
symbols = [replaceSymbolCharacters(symbol) for symbol in symbols];
market_caps = [maybe_float(row) for row in h.xpath('//td[@class="no-wrap market-cap text-right"]/@data-usd')]
oneday_volumes = [maybe_float(row) for row in h.xpath('//a[@class="volume"]/@data-usd')]
prices_usd = [maybe_float(row) for row in h.xpath('//a[@class="price"]/@data-usd')]
prices_btc = [maybe_float(row) for row in h.xpath('//a[@class="price"]/@data-btc')]
formattedForReturn = {};
for x in range(0, len(symbols)):
formattedForReturn[symbols[x]] = {'name': names[x], 'symbol': symbols[x], 'market_cap': market_caps[x], 'oneday_volume': oneday_volumes[x], 'price_usd': prices_usd[x],
'price_btc': prices_btc[x]};
if config.DEBUG:
print("Finished parsing " + date);
return formattedForReturn
def write_snapshotresults_to_database( datesAndData ):
result = collection.insert_many(datesAndData)
#print("wrote " + str(len(datesAndData)) + " to db!");
result.inserted_ids
def replaceSymbolCharacters( stringToFix ):
symbols_that_does_not_work_in_mongo_and_their_replacements = {'$': 'SSS'};
for symbol, replacement in symbols_that_does_not_work_in_mongo_and_their_replacements.items():
# print("want to replace" + symbol + " in string " + stringToFix + " with " + replacement);
stringToFix = stringToFix.replace(symbol, replacement);
return stringToFix;
def parse_and_save_data( snapshotDatesToParse ):
while len(snapshotDatesToParse) > 0:
# first parse
parsedSnapshots = [];
limit = 2;
counter = 0;
while counter < limit and len(snapshotDatesToParse) > 0:
snapshotDate = snapshotDatesToParse.pop();
entry = {};
entry['date'] = snapshotDate;
entry['marketData'] = parse_snapshot(snapshotDate);
parsedSnapshots.append(entry);
# print(parsedSnapshots);
counter += 1;
# then save
write_snapshotresults_to_database(parsedSnapshots)
progress_number = float(start_amount - len(snapshotDatesToParse)) / float( start_amount) * 100
progress_string = "{:.1f}".format(progress_number) + "%"
print("wrote to database, progress: " + progress_string)
parse_and_save_data(snapshotDates); # write_snapshotresults_to_database(allRecordedSnapshots);
| 2.703125
| 3
|
patterns/behavioral/iterator.py
|
smartlegionlab/python-patterns
| 2
|
12775661
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Licensed under the terms of the BSD 3-Clause License
# (see LICENSE for details).
# Copyright © 2018-2021, <NAME>
# All rights reserved.
# --------------------------------------------------------
class IteratorBase:
"""Base iterator class"""
def first(self):
"""
Returns the first item in the collection.
If the element does not exist, an IndexError exception is thrown.
"""
raise NotImplementedError()
def last(self):
"""
Returns the last item in the collection.
If the element does not exist, an IndexError exception is thrown.
"""
raise NotImplementedError()
def next(self):
"""Returns the next item in the collection"""
raise NotImplementedError()
def prev(self):
"""Returns the previous item in the collection"""
raise NotImplementedError()
def current_item(self):
"""Returns the current item in the collection"""
raise NotImplementedError()
def is_done(self, index):
"""Returns true if the element at the specified index exists, false otherwise"""
raise NotImplementedError()
def get_item(self, index):
"""Returns the collection item at the specified index,
otherwise throws an IndexError exception"""
raise NotImplementedError()
class Iterator(IteratorBase):
def __init__(self, list_=None):
self._list = list_ or []
self._current = 0
def first(self):
return self._list[0]
def last(self):
return self._list[-1]
def current_item(self):
return self._list[self._current]
def is_done(self, index):
last_index = len(self._list) - 1
return 0 <= index <= last_index
def next(self):
self._current += 1
if not self.is_done(self._current):
self._current = 0
return self.current_item()
def prev(self):
self._current -= 1
if not self.is_done(self._current):
self._current = len(self._list) - 1
return self.current_item()
def get_item(self, index):
if not self.is_done(index):
raise IndexError('No item with index: %d' % index)
return self._list[index]
def main():
it = Iterator(['one', 'two', 'three', 'four', 'five'])
print([it.prev() for _ in range(5)])
print([it.next() for _ in range(5)])
if __name__ == '__main__':
# Output:
# ['five', 'four', 'three', 'two', 'one']
# ['two', 'three', 'four', 'five', 'one']
main()
| 3.734375
| 4
|
data/studio21_generated/introductory/3662/starter_code.py
|
vijaykumawat256/Prompt-Summarization
| 0
|
12775662
|
def xor(a,b):
| 1.09375
| 1
|
bin/syncwbern52.py
|
DSOlab/autobern
| 5
|
12775663
|
<gh_stars>1-10
#! /usr/bin/python
#-*- coding: utf-8 -*-
from __future__ import print_function
import sys
import os
import subprocess
import argparse
import pybern.products.bernparsers.bloadvar as blvar
## If only the formatter_class could be:
##+ argparse.RawTextHelpFormatter|ArgumentDefaultsHelpFormatter ....
## Seems to work with multiple inheritance!
class myFormatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawTextHelpFormatter):
pass
parser = argparse.ArgumentParser(
formatter_class=myFormatter,
description=
'Synchronize a folder with AIUB\s remote GEN directory',
epilog=('''National Technical University of Athens,
Dionysos Satellite Observatory\n
Send bug reports to:
<NAME>, <EMAIL>
<NAME>,<EMAIL>
January, 2021'''))
parser.add_argument(
'-t',
'--target',
metavar='TARGET_DIR',
dest='target',
required=False,
help='Local, target directory to synchronize')
parser.add_argument(
'-l',
'--log',
metavar='LOG_FILE',
dest='log_file',
required=False,
help='Log file to hold mirroring status/records')
parser.add_argument(
'-b',
'--bernese-loadvar',
metavar='BERN_LOADVAR',
dest='bern_loadvar',
required=False,
help='Specify a Bernese source file (i.e. the file BERN52/GPS/EXE/LOADGPS.setvar) which can be sourced; if such a file is set, then the local target directory is defined by the variable $X\GEN')
parser.add_argument('--verbose',
dest='verbose',
action='store_true',
help='Trigger verbose run (prints debug messages).')
if __name__ == '__main__':
args = parser.parse_args()
## verbose print
verboseprint = print if args.verbose else lambda *a, **k: None
## we must at least have either a target (local) directory or a loadvar
##+ file
if not args.target and not args.bern_loadvar:
print('[ERROR] Must at least specify either a target dir or a LOADVAR file', file=sys.stderr)
sys.exit(1)
## get the local, target dir
if args.bern_loadvar:
if not os.path.isfile(args.bern_loadvar):
print('[ERROR] Failed to find LOADVAR file {:}; exiting'.format(args.bern_loadvar), file=sys.stderr)
sys.exit(1)
target_path = blvar.parse_loadvar(args.bern_loadvar)['X']
target_dir = os.path.join(target_path, 'GEN')
else:
target_dir = args.target
if not os.path.isdir(target_dir):
print('[ERROR] Local GEN path does not exist: {:}'.format(target_dir), file=sys.stderr)
sys.exit(1)
## mirror one-liner that uses lftp
lcmd = "mirror --only-newer --parallel=3 --verbose --exclude-glob *.EPH {remote_dir} {local_dir}; bye".format(remote_dir='BSWUSER52/GEN', local_dir=target_dir)
#result = subprocess.run(
# ['lftp', '-u', '{:},{:}'.format('anonymous', '<EMAIL>'), '-e', '{:}'.format(lcmd), '{:}'.format('ftp.aiub.unibe.ch')], shell=False, check=True)
result = subprocess.run(
['lftp', '-u', '{:},{:}'.format('anonymous', '<EMAIL>'), '-e', '{:}'.format(lcmd), '{:}'.format('ftp.aiub.unibe.ch')], shell=False, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if result.returncode:
print('[ERROR] Mirroring failed with errorcode: {:}'.format(result.returncode), file=sys.stderr)
print('[ERROR] lftp/shell return lines: {:}'.format(result.stderr))
else:
if args.log_file:
with open(args.log_file, 'w') as log:
print(result.stdout, file=log)
sys.exit(0)
| 2.28125
| 2
|
infra/asynchttp/requester.py
|
kokokuo/crawler-TW-hotels
| 4
|
12775664
|
from urllib import parse
from typing import Optional
from logging import Logger
from lxml import etree
from aiohttp import ClientSession
from tenacity import retry
from tenacity import retry_if_exception_type, stop_after_attempt, wait_fixed
from infra.excepts.types import ReqSysAbnoramlError
from infra.asynchttp.resp import SyncHttpResponse
from settings.config import Config
class RetryableRequester(object):
def __init__(self, logger: Logger, abnormal_url: str) -> None:
self._logger = logger
self._abnormal_url = abnormal_url
@retry(stop=stop_after_attempt(3), wait=wait_fixed(5), retry=retry_if_exception_type(ReqSysAbnoramlError))
async def get(self,
url: str,
params: dict,
headers: Optional[dict] = None,
cookies: Optional[dict] = None) -> SyncHttpResponse:
try:
encoded_params = parse.urlencode(params)
async with ClientSession() as session:
async with session.get(url, params=encoded_params, headers=headers, cookies=cookies) as resp:
sync_resp = SyncHttpResponse(await resp.read(),
await resp.text(),
resp.status,
resp.headers,
resp.cookies,
resp.url.human_repr())
self._logger.debug(f"Response Cookies: {sync_resp.cookies}")
await self._check_does_normal_resp(sync_resp)
return sync_resp
except ReqSysAbnoramlError as rse:
self._logger.warning(f" [ Warning ] 請求網址的回應異常 ! ")
self._logger.warning(f" 請求網址 : {url} | params: {params} | headers: {headers} | cookies: {cookies}")
self._logger.warning(f" 回應網址 : {rse.url} | 頁面狀態碼: {rse.http_code}\n" + rse.content)
raise rse
async def _check_does_normal_resp(self, resp: SyncHttpResponse) -> bool:
if resp.url == self._abnormal_url:
lxmltree = etree.HTML(resp.raw_content)
content = etree.tostring(lxmltree, method='html', pretty_print=True).decode('utf-8')
raise ReqSysAbnoramlError(resp.status_code, "解析旅館資料異常!皆為 None", resp.url, content)
return True
| 2.390625
| 2
|
train/2_mnist_2.py
|
asysbang/tensorflow
| 0
|
12775665
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import os
import tensorflow as tf
from tensorflow import keras
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()
train_labels = train_labels[:1000]
test_labels = test_labels[:1000]
train_images = train_images[:1000].reshape(-1, 28 * 28) / 255.0 #注意这里不是28,28 而是28*28
test_images = test_images[:1000].reshape(-1, 28 * 28) / 255.0
def create_model():
model = keras.models.Sequential([
keras.layers.Dense(512, activation='relu', input_shape=(784,)),
keras.layers.Dropout(0.2),
keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
return model
model = create_model()
model.summary()
checkoutpoint_path = 'checkout/cp.ckpt'
checkoutpoint_dir = os.path.dirname(checkoutpoint_path)
cp_callback = keras.callbacks.ModelCheckpoint(filepath=checkoutpoint_path,
save_weights_only=True,
verbose=1)
model.fit(train_images, train_labels, epochs=10, validation_data=(test_images, test_labels), callbacks=[cp_callback])
!ls {checkpoint_dir}
| 2.53125
| 3
|
tests/test_bvi_utils_paths.py
|
brainvisa/brainvisa-installer
| 0
|
12775666
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import pytest
import os
from os.path import relpath
from brainvisa.installer.bvi_utils.paths import Paths
FULLPATH = os.path.dirname(os.path.abspath(__file__))
ROOTPATH = "/" + relpath(FULLPATH + "/../.", "/")
def test_bvi_utils_Paths():
assert Paths.BV == ROOTPATH
assert Paths.BV_PYTHON == ROOTPATH + '/python'
assert Paths.BV_SHARE == ROOTPATH + '/share'
assert Paths.BVI_SHARE == ROOTPATH + '/share/brainvisa/installer'
assert Paths.BVI_SHARE_XML == ROOTPATH + '/share/brainvisa/installer/xml'
assert Paths.BVI_SHARE_IMAGES == ROOTPATH + '/share/brainvisa/installer/images'
assert Paths.BVI_SHARE_LICENSES == ROOTPATH + \
'/share/brainvisa/installer/licenses'
assert Paths.BVI_CONFIGURATION == ROOTPATH + \
'/share/brainvisa/installer/xml/configuration.xml'
@pytest.mark.win32
def test_bvi_utils_Paths_Binary_win():
assert Paths.BV_ENV == 'bv_env.exe'
assert Paths.BV_PACKAGING == 'bv_packaging'
assert Paths.IFW_BINARYCREATOR == 'binarycreator.exe'
assert Paths.IFW_REPOGEN == 'repogen.exe'
assert Paths.IFW_ARCHIVEGEN == 'archivegen.exe'
@pytest.mark.linux
def test_bvi_utils_Paths_Binary_linux():
assert Paths.BV_ENV == 'bv_env'
assert Paths.BV_PACKAGING == 'bv_packaging'
assert Paths.IFW_BINARYCREATOR == 'binarycreator'
assert Paths.IFW_REPOGEN == 'repogen'
assert Paths.IFW_ARCHIVEGEN == 'archivegen'
@pytest.mark.osx
def test_bvi_utils_Paths_Binary_osx():
assert Paths.BV_ENV == 'bv_env'
assert Paths.BV_PACKAGING == 'bv_packaging'
assert Paths.IFW_BINARYCREATOR == 'binarycreator'
assert Paths.IFW_REPOGEN == 'repogen'
assert Paths.IFW_ARCHIVEGEN == 'archivegen'
| 2.015625
| 2
|
_cloud/installer.py
|
pythonista-cloud/cloud
| 4
|
12775667
|
"""Handles the installation of downloaded modules."""
import os
import shutil
import tempfile
import zipfile
from _cloud import utils
def install(zipfile, metadata):
"""Install a module once it has been downloaded locally.
Takes the GitHub repo zipped up in a BytesIO, as well as all the metadata
about the package.
"""
# Initial extraction (to a temporary directory)
zipfile = zipfile.ZipFile(zipfile)
extract_to = tempfile.gettempdir()
zipfile.extractall(extract_to)
# Moving of the main module to a site-packages dir
extracted = os.path.join(extract_to, zipfile.namelist()[0])
source = os.path.join(extracted, metadata["entry_point"])
destination = os.path.join(
utils.pick_site_dir(metadata["py_versions"]),
os.path.basename(metadata["entry_point"])
)
shutil.move(source, destination)
| 2.421875
| 2
|
misc/joystick_servo_control.py
|
HuiminHe/rl-swing
| 0
|
12775668
|
from __future__ import division
from __future__ import print_function
import sys
sys.path.append('..')
from joystick import JoyStick
from servo import ServoController
if __name__ == '__main__':
sc = ServoController(angle_range=80)
js = JoyStick()
while True:
# read serial
v = js.read()
# convert to servo angle
angle = sc.analog2deg(v)
print(angle)
for i in range(len(angle)):
sc.control([i, angle[i]])
| 2.65625
| 3
|
datumaro/plugins/sly_pointcloud_format/format.py
|
certiware/posemaro
| 0
|
12775669
|
# Copyright (C) 2021 Intel Corporation
#
# SPDX-License-Identifier: MIT
class PointCloudPath:
BASE_DIR = 'ds0'
ANNNOTATION_DIR = 'ann'
DEFAULT_IMAGE_EXT = '.jpg'
POINT_CLOUD_DIR = 'pointcloud'
RELATED_IMAGES_DIR = 'related_images'
KEY_ID_FILE = 'key_id_map.json'
META_FILE = 'meta.json'
SPECIAL_ATTRS = {'description', 'track_id',
'labelerLogin', 'createdAt', 'updatedAt', 'frame'}
| 1.203125
| 1
|
setup.py
|
pmaris/pynextbus
| 1
|
12775670
|
<reponame>pmaris/pynextbus
from setuptools import setup, find_packages
with open('README.md', 'r') as readme:
long_description = readme.read()
setup(
name='py_nextbus',
version='0.1.2',
author='<NAME>',
description='Minimalistic Python client for the NextBus public API for real-time transit ' \
'arrival data',
long_description=long_description,
long_description_content_type="text/markdown",
test_suite='tests.py',
url='https://github.com/pmaris/py_nextbus',
packages=find_packages(),
python_requires='>=3',
classifiers=(
"Programming Language :: Python :: 3.6 ",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
)
| 1.609375
| 2
|
dgn/invertible_layers.py
|
matt-graham/differentiable-generator-networks
| 1
|
12775671
|
<reponame>matt-graham/differentiable-generator-networks<filename>dgn/invertible_layers.py<gh_stars>1-10
# -*- coding: utf-8 -*-
"""Invertible density network layer definitions."""
__authors__ = '<NAME>'
__license__ = 'MIT'
import numpy as np
import theano as th
import theano.tensor as tt
import theano.tensor.slinalg as slinalg
from theano_cpu_ops import (
log_det, lower_triangular_solve, upper_triangular_solve)
class DensityNetworkLayer(object):
""" Base class for invertible density network layers. """
def __init__(self, params):
self.params = params
def param_log_prior(self):
return tt.constant(0.)
def forward_map(self, x):
raise NotImplementedError()
def inverse_map(self, y):
raise NotImplementedError()
def forward_jacobian_log_det(self, x):
raise NotImplementedError()
def compile_theano_functions(self):
""" Compile functions from symbolic theano methods defined in class.
Intended only to be used for unit testing of methods therefore not
called by default during construction of object as generally a
whole symbolic computational graph should be compiled from the
composition of multiple layers rather than compiling functions for
each layer separately.
"""
x_batch = tt.matrix('x_batch')
x_point = tt.vector('x_point')
y_batch = tt.matrix('y_batch')
y_point = tt.vector('y_point')
self.forward_map_batch = th.function(
inputs=[x_batch],
outputs=self.forward_map(x_batch)
)
self.forward_map_point = th.function(
inputs=[x_point],
outputs=self.forward_map(x_point)
)
self.inverse_map_batch = th.function(
inputs=[y_batch],
outputs=self.inverse_map(y_batch)
)
self.inverse_map_point = th.function(
inputs=[y_point],
outputs=self.inverse_map(y_point)
)
self.forward_jacobian_log_det_batch = th.function(
inputs=[x_batch],
outputs=self.forward_jacobian_log_det(x_batch),
on_unused_input='ignore'
)
self.forward_jacobian_log_det_point = th.function(
inputs=[x_point],
outputs=self.forward_jacobian_log_det(x_point),
on_unused_input='ignore'
)
class LeapfrogLayer(DensityNetworkLayer):
"""
Layer applying invertible iterated leapfrog type transformation.
"""
def __init__(self, map_1, map_2, split, n_iter=1):
self.map_1 = map_1
self.map_2 = map_2
self.split = split
self.n_iter = n_iter
super(LeapfrogLayer, self).__init__(
self.map_1.params + self.map_2.params)
def param_log_prior(self):
return self.map_1.param_log_prior() + self.map_2.param_log_prior()
def forward_map(self, x):
if x.ndim == 1:
x = x.reshape((1, -1))
n_dim_orig = 1
elif x.ndim == 2:
n_dim_orig = 2
else:
raise ValueError('x must be one or two dimensional.')
x1, x2 = x[:, :self.split], x[:, self.split:]
for s in range(self.n_iter):
y1 = x1 + self.map_1(x2)
y2 = x2 + self.map_2(y1)
x1, x2 = y1, y2
y = tt.join(1, y1, y2)
if n_dim_orig == 1:
y = y.flatten()
return y
def inverse_map(self, y):
if y.ndim == 1:
y = y.reshape((1, -1))
n_dim_orig = 1
elif y.ndim == 2:
n_dim_orig = 2
else:
raise ValueError('y must be one or two dimensional.')
y1, y2 = y[:, :self.split], y[:, self.split:]
for s in range(self.n_iter):
x2 = y2 - self.map_2(y1)
x1 = y1 - self.map_1(x2)
y1, y2 = x1, x2
x = tt.join(1, x1, x2)
if n_dim_orig == 1:
x = x.flatten()
return x
def forward_jacobian_log_det(self, x):
return tt.constant(0.)
class AffineLayer(DensityNetworkLayer):
"""
Layer applying general affine transformation.
Forward map: x -> W.dot(x) + b
"""
def __init__(self, weights_init, biases_init, weights_prec=0.,
biases_prec=0., weights_mean=None, biases_mean=None):
assert weights_init.ndim == 2, 'weights_init must be 2D array.'
assert biases_init.ndim == 1, 'biases_init must be 1D array.'
assert weights_init.shape[0] == biases_init.shape[0], \
'Dimensions of weights_init and biases_init must be consistent.'
self.weights = th.shared(weights_init, name='W')
self.biases = th.shared(biases_init, name='b')
self.weights_prec = weights_prec
self.biases_prec = biases_prec
if weights_mean is None:
weights_mean = np.identity(weights_init.shape[0])
if biases_mean is None:
biases_mean = np.zeros_like(biases_init)
self.weights_mean = weights_mean
self.biases_mean = biases_mean
super(AffineLayer, self).__init__([self.weights, self.biases])
def param_log_prior(self):
return -(0.5 * self.weights_prec *
((self.weights - self.weights_mean)**2).sum() +
0.5 * self.biases_prec *
((self.biases - self.biases_mean)**2).sum())
def forward_map(self, x):
return x.dot(self.weights.T) + self.biases
def inverse_map(self, y):
return slinalg.solve(self.weights, (y - self.biases).T).T
def forward_jacobian_log_det(self, x):
if x.ndim == 1:
return log_det(self.weights)
elif x.ndim == 2:
return x.shape[0] * log_det(self.weights)
else:
raise ValueError('x must be one or two dimensional.')
class DiagonalAffineLayer(DensityNetworkLayer):
"""
Layer applying restricted affine transformation.
Matrix restricted to diagonal transformation.
Forward map: x -> diag(d).dot(x) + b
"""
def __init__(self, diag_weights_init, biases_init,
diag_weights_prec=0., biases_prec=0.,
diag_weights_mean=None, biases_mean=None):
assert diag_weights_init.ndim == 1, (
'diag_weights_init must be 1D array.')
assert biases_init.ndim == 1, 'biases_init must be 1D array.'
assert diag_weights_init.size == biases_init.size, (
'Dimensions of diag_weights_init and biases_init inconsistent.')
self.diag_weights = th.shared(diag_weights_init, name='d')
self.biases = th.shared(biases_init, name='b')
self.diag_weights_prec = diag_weights_prec
self.biases_prec = biases_prec
if diag_weights_mean is None:
diag_weights_mean = np.ones_like(diag_weights_init)
if biases_mean is None:
biases_mean = np.zeros_like(biases_init)
self.diag_weights_mean = diag_weights_mean
self.biases_mean = biases_mean
super(DiagonalAffineLayer, self).__init__(
[self.diag_weights, self.biases])
def param_log_prior(self):
return -(0.5 * self.diag_weights_prec *
((self.diag_weights - self.diag_weights_mean)**2).sum() +
0.5 * self.biases_prec *
((self.biases - self.biases_mean)**2).sum())
def forward_map(self, x):
if x.ndim == 1:
return x * self.diag_weights + self.biases
elif x.ndim == 2:
return x * self.diag_weights + self.biases
else:
raise ValueError('x must be one or two dimensional.')
def inverse_map(self, y):
return (y - self.biases) / self.diag_weights
def forward_jacobian_log_det(self, x):
if x.ndim == 1:
return tt.log(tt.abs_(self.diag_weights)).sum()
elif x.ndim == 2:
return x.shape[0] * tt.log(tt.abs_(self.diag_weights)).sum()
else:
raise ValueError('x must be one or two dimensional.')
class DiagPlusRank1AffineLayer(DensityNetworkLayer):
"""
Layer applying restricted affine transformation.
Matrix restricted to diagonal plus rank-1 transformation.
Forward map: x -> (diag(d) + outer(u, v)).dot(x) + b
"""
def __init__(self, diag_weights_init, u_vect_init, v_vect_init,
biases_init, diag_weights_prec=0., u_vect_prec=0.,
v_vect_prec=0., biases_prec=0., diag_weights_mean=None,
u_vect_mean=None, v_vect_mean=None, biases_mean=None):
assert diag_weights_init.ndim == 1, (
'diag_weights_init must be 1D array.')
assert u_vect_init.ndim == 1, 'u_vect_init must be 1D array.'
assert v_vect_init.ndim == 1, 'v_vect_init must be 1D array.'
assert biases_init.ndim == 1, 'biases_init must be 1D array.'
assert (diag_weights_init.size == u_vect_init.size and
diag_weights_init.size == v_vect_init.size and
diag_weights_init.size == biases_init.size), (
'Dimensions of diag_weights_init, u_vect_unit,'
' v_vect_init and biases_init inconsistent.')
self.diag_weights = th.shared(diag_weights_init, name='d')
self.u_vect = th.shared(u_vect_init, name='u')
self.v_vect = th.shared(v_vect_init, name='v')
self.biases = th.shared(biases_init, name='b')
self.diag_weights_prec = diag_weights_prec
self.u_vect_prec = u_vect_prec
self.v_vect_prec = v_vect_prec
self.biases_prec = biases_prec
if diag_weights_mean is None:
diag_weights_mean = np.ones_like(diag_weights_init)
if u_vect_mean is None:
u_vect_mean = np.zeros_like(u_vect_init)
if v_vect_mean is None:
v_vect_mean = np.zeros_like(v_vect_init)
if biases_mean is None:
biases_mean = np.zeros_like(biases_init)
self.diag_weights_mean = diag_weights_mean
self.u_vect_mean = u_vect_mean
self.v_vect_mean = v_vect_mean
self.biases_mean = biases_mean
super(DiagPlusRank1AffineLayer, self).__init__(
[self.diag_weights, self.u_vect, self.v_vect, self.biases])
def param_log_prior(self):
return -(0.5 * self.diag_weights_prec *
((self.diag_weights - self.diag_weights_mean)**2).sum() +
0.5 * self.u_vect_prec *
((self.u_vect - self.u_vect_mean)**2).sum() +
0.5 * self.v_vect_prec *
((self.v_vect - self.v_vect_mean)**2).sum() +
0.5 * self.biases_prec *
((self.biases - self.biases_mean)**2).sum())
def forward_map(self, x):
if x.ndim == 1:
return (x * self.diag_weights + self.u_vect * x.dot(self.v_vect)
+ self.biases)
elif x.ndim == 2:
return (x * self.diag_weights +
self.u_vect[None, :] * (x.dot(self.v_vect)[:, None]) +
self.biases)
else:
raise ValueError('x must be one or two dimensional.')
def inverse_map(self, y):
z = (y - self.biases) / self.diag_weights
u_vect_over_diag_weights = (self.u_vect / self.diag_weights)
if y.ndim == 1:
return (z - u_vect_over_diag_weights *
(z.dot(self.v_vect)) /
(1 + self.v_vect.dot(u_vect_over_diag_weights)))
elif y.ndim == 2:
return (z - u_vect_over_diag_weights[None, :] *
(z.dot(self.v_vect))[:, None] /
(1 + self.v_vect.dot(u_vect_over_diag_weights)))
else:
raise ValueError('y must be one or two dimensional.')
def forward_jacobian_log_det(self, x):
if x.ndim == 1:
return (tt.log(tt.abs_(1 + self.v_vect.dot(self.u_vect /
self.diag_weights))) +
tt.log(tt.abs_(self.diag_weights)).sum())
elif x.ndim == 2:
return x.shape[0] * (
tt.log(tt.abs_(1 + self.v_vect.dot(self.u_vect /
self.diag_weights))) +
tt.log(tt.abs_(self.diag_weights)).sum()
)
else:
raise ValueError('x must be one or two dimensional.')
class TriangularAffineLayer(DensityNetworkLayer):
"""
Layer applying restricted affine transformation.
Matrix restricted to be triangular.
Forward map:
if lower:
x -> tril(W).dot(x) + b
else:
x -> triu(W).dot(x) + b
"""
def __init__(self, weights_init, biases_init, lower=False,
weights_prec=0., biases_prec=0., weights_mean=None,
biases_mean=None):
assert weights_init.ndim == 2, 'weights_init must be 2D array.'
assert biases_init.ndim == 1, 'biases_init must be 1D array.'
assert weights_init.shape[0] == biases_init.shape[0], \
'Dimensions of weights_init and biases_init must be consistent.'
self.lower = lower
self.weights = th.shared(weights_init, name='W')
self.weights_tri = (tt.tril(self.weights)
if lower else tt.triu(self.weights))
self.biases = th.shared(biases_init, name='b')
self.weights_prec = weights_prec
self.biases_prec = biases_prec
if weights_mean is None:
weights_mean = np.eye(weights_init.shape[0])
if biases_mean is None:
biases_mean = np.zeros_like(biases_init)
self.weights_mean = (np.tril(weights_mean)
if lower else np.triu(weights_mean))
self.biases_mean = biases_mean
super(TriangularAffineLayer, self).__init__(
[self.weights, self.biases])
def param_log_prior(self):
return -(0.5 * self.weights_prec *
((self.weights_tri - self.weights_mean)**2).sum()
+ 0.5 * self.biases_prec *
((self.biases - self.biases_mean)**2).sum())
def forward_map(self, x):
return x.dot(self.weights_tri.T) + self.biases
def inverse_map(self, y):
if self.lower:
return lower_triangular_solve(self.weights_tri,
(y - self.biases).T).T
else:
return upper_triangular_solve(self.weights_tri,
(y - self.biases).T).T
def forward_jacobian_log_det(self, x):
if x.ndim == 1:
return tt.log(tt.abs_(tt.nlinalg.diag(self.weights))).sum()
elif x.ndim == 2:
return (x.shape[0] *
tt.log(tt.abs_(tt.nlinalg.diag(self.weights))).sum())
else:
raise ValueError('x must be one or two dimensional.')
class ElementwiseLayer(DensityNetworkLayer):
"""
Layer applying bijective elementwise transformation.
Forward map: x -> f(x)
"""
def __init__(self, forward_func, inverse_func, fudge=0.):
self.forward_func = forward_func
self.inverse_func = inverse_func
self.fudge = fudge
super(ElementwiseLayer, self).__init__([])
def forward_map(self, x):
return self.forward_func(x)
def inverse_map(self, y):
return self.inverse_func(y)
def forward_jacobian_log_det(self, x):
dy_dx, _ = th.scan(lambda x_i: th.grad(self.forward_func(x_i), x_i),
sequences=[x.flatten()])
if self.fudge != 0.:
return tt.log(dy_dx + self.fudge).sum()
else:
return tt.log(dy_dx).sum()
class FwdDiagInvElementwiseLayer(DiagonalAffineLayer):
"""
Layer applying forward elementwise map, diagonal scaling then inverse map.
Forward map: x -> f(d * g(x)) where g(f(x)) = f(g(x)) = x
"""
def __init__(self, forward_func, inverse_func,
diag_weights_init, biases_init,
diag_weights_prec=0., biases_prec=0.,
diag_weights_mean=None, biases_mean=None,
fudge=0.):
self.forward_func = forward_func
self.inverse_func = inverse_func
self.fudge = fudge
super(FwdDiagInvElementwiseLayer, self).__init__(
diag_weights_init, biases_init, diag_weights_prec,
biases_prec, diag_weights_mean, biases_mean)
def forward_map(self, x):
return self.forward_func(self.diag_weights * self.inverse_func(x) +
self.biases)
def inverse_map(self, y):
return self.forward_func((self.inverse_func(y) - self.biases) /
self.diag_weights)
def forward_jacobian_log_det(self, x):
y_sum = self.forward_map(x).sum()
dy_dx = th.grad(y_sum, x)
if self.fudge != 0.:
return tt.log(dy_dx + self.fudge).sum()
else:
return tt.log(dy_dx).sum()
class PermuteDimensionsLayer(DensityNetworkLayer):
"""
Layer applying permutation of dimensions.
Forward map: x -> x[perm]
"""
def __init__(self, perm):
self.perm = th.shared(perm, name='perm')
super(PermuteDimensionsLayer, self).__init__([])
def forward_map(self, x):
return tt.permute_row_elements(x, self.perm)
def inverse_map(self, y):
return tt.permute_row_elements(y, self.perm, inverse=True)
def forward_jacobian_log_det(self, x):
return tt.constant(0.)
| 2.59375
| 3
|
linux/lib/python2.7/dist-packages/samba/provision/common.py
|
nmercier/linux-cross-gcc
| 3
|
12775672
|
# Unix SMB/CIFS implementation.
# utility functions for provisioning a Samba4 server
# Copyright (C) <NAME> <<EMAIL>> 2007-2010
# Copyright (C) <NAME> <<EMAIL>> 2008-2009
# Copyright (C) <NAME> <<EMAIL>> 2008-2009
#
# Based on the original in EJS:
# Copyright (C) <NAME> <<EMAIL>> 2005
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Functions for setting up a Samba configuration."""
__docformat__ = "restructuredText"
import os
from samba import read_and_sub_file
from samba.param import setup_dir
FILL_FULL = "FULL"
FILL_SUBDOMAIN = "SUBDOMAIN"
FILL_NT4SYNC = "NT4SYNC"
FILL_DRS = "DRS"
def setup_path(file):
"""Return an absolute path to the provision tempate file specified by file"""
return os.path.join(setup_dir(), file)
def setup_add_ldif(ldb, ldif_path, subst_vars=None,controls=["relax:0"]):
"""Setup a ldb in the private dir.
:param ldb: LDB file to import data into
:param ldif_path: Path of the LDIF file to load
:param subst_vars: Optional variables to subsitute in LDIF.
:param nocontrols: Optional list of controls, can be None for no controls
"""
assert isinstance(ldif_path, str)
data = read_and_sub_file(ldif_path, subst_vars)
ldb.add_ldif(data, controls)
def setup_modify_ldif(ldb, ldif_path, subst_vars=None,controls=["relax:0"]):
"""Modify a ldb in the private dir.
:param ldb: LDB object.
:param ldif_path: LDIF file path.
:param subst_vars: Optional dictionary with substitution variables.
"""
data = read_and_sub_file(ldif_path, subst_vars)
ldb.modify_ldif(data, controls)
def setup_ldb(ldb, ldif_path, subst_vars):
"""Import a LDIF a file into a LDB handle, optionally substituting
variables.
:note: Either all LDIF data will be added or none (using transactions).
:param ldb: LDB file to import into.
:param ldif_path: Path to the LDIF file.
:param subst_vars: Dictionary with substitution variables.
"""
assert ldb is not None
ldb.transaction_start()
try:
setup_add_ldif(ldb, ldif_path, subst_vars)
except:
ldb.transaction_cancel()
raise
else:
ldb.transaction_commit()
| 2.40625
| 2
|
lib/file.py
|
encodingl/skadmin
| 0
|
12775673
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from subprocess import Popen, PIPE, STDOUT, call
import os
def new_file(file_name,file_content):
with open(file_name,"a+") as f:
f.write(file_content)
def get_ex_link(hosts,dir):
script_link_read = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))+ "/scripts/skdeploy/linkread.py"
cmd = "ansible %s -m script -a '%s -r %s'" % (hosts,script_link_read,dir)
pcmd = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
retcode_message=pcmd.communicate()
r1 = retcode_message[0]
print r1
r2 = r1.split(" => ")[1]
print r2
true = "true"
dic = eval(r2)
ex_link = dic["stdout_lines"][0]
return ex_link
if __name__ == "__main__":
print get_ex_link(hosts="yyappgw", dir="/opt/soft/tomcat/yyappgw/webapps/ROOT")
| 2.375
| 2
|
sql/admin.insert.py
|
Jasata/utu-schooner
| 0
|
12775674
|
#!/bin/env python3
#
# Schooner - Simple Course Management System
# admin.insert.py / Add script owner as an Admin
# University of Turku / Faculty of Technology / Department of Computing
# <NAME> <<EMAIL>>
#
# 2021-08-13 Initial version.
#
# NOTE: Using Psycopg 3 (dev2) 2021-08-13
#
import os
import pwd
import syslog
import psycopg
# Owner of this file - the user who pulled/cloned this repository
GITUSER = pwd.getpwuid(os.stat(__file__).st_uid).pw_name
# This syntax doesn't work with psycopg3.. ? To-be-investigated...
#with psycopg.connect(dbname="schooner" user="postgres") as conn:
with psycopg.connect("dbname=schooner user=postgres") as conn:
with conn.cursor() as cur:
try:
cur.execute(
"INSERT INTO admin (uid) VALUES (%(uid)s)",
{ "uid" : GITUSER }
)
except psycopg.Error as e:
syslog.syslog(
"Database error: " + e + ", SQL: " + cur.query
)
os._exit(1)
else:
syslog.syslog(
f"{GITUSER} added as an Admin"
)
| 2.234375
| 2
|
formalchemy/tests/test_fieldset.py
|
samuelchen/formalchemy
| 2
|
12775675
|
__doc__ = r"""
>>> from formalchemy.tests import *
>>> FieldSet.default_renderers = original_renderers.copy()
# some low-level testing first
>>> fs = FieldSet(order1)
>>> fs._raw_fields()
[AttributeField(id), AttributeField(user_id), AttributeField(quantity), AttributeField(user)]
>>> fs.user.name
'user_id'
>>> fs = FieldSet(bill)
>>> fs._raw_fields()
[AttributeField(id), AttributeField(email), AttributeField(password), AttributeField(name), AttributeField(orders)]
>>> fs.orders.name
'orders'
binding should not change attribute order:
>>> fs = FieldSet(User)
>>> fs_bound = fs.bind(User)
>>> fs_bound._fields.values()
[AttributeField(id), AttributeField(email), AttributeField(password), AttributeField(name), AttributeField(orders)]
>>> fs = FieldSet(User2)
>>> fs._raw_fields()
[AttributeField(user_id), AttributeField(address_id), AttributeField(name), AttributeField(address)]
>>> fs.render() #doctest: +ELLIPSIS
Traceback (most recent call last):
...
Exception: No session found...
>>> fs = FieldSet(One)
>>> fs.configure(pk=True, focus=None)
>>> fs.id.is_required()
True
>>> print fs.render()
<div>
<label class="field_req" for="One--id">
Id
</label>
<input id="One--id" name="One--id" type="text" />
</div>
>>> fs = FieldSet(Two)
>>> fs
<FieldSet with ['id', 'foo']>
>>> fs.configure(pk=True)
>>> fs
<FieldSet (configured) with ['id', 'foo']>
>>> print fs.render()
<div>
<label class="field_req" for="Two--id">
Id
</label>
<input id="Two--id" name="Two--id" type="text" />
</div>
<script type="text/javascript">
//<![CDATA[
document.getElementById("Two--id").focus();
//]]>
</script>
<div>
<label class="field_opt" for="Two--foo">
Foo
</label>
<input id="Two--foo" name="Two--foo" type="text" value="133" />
</div>
>>> fs = FieldSet(Two)
>>> print fs.render()
<div>
<label class="field_opt" for="Two--foo">
Foo
</label>
<input id="Two--foo" name="Two--foo" type="text" value="133" />
</div>
<script type="text/javascript">
//<![CDATA[
document.getElementById("Two--foo").focus();
//]]>
</script>
>>> fs = FieldSet(Two)
>>> fs.configure(options=[fs.foo.label('A custom label')])
>>> print fs.render()
<div>
<label class="field_opt" for="Two--foo">
A custom label
</label>
<input id="Two--foo" name="Two--foo" type="text" value="133" />
</div>
<script type="text/javascript">
//<![CDATA[
document.getElementById("Two--foo").focus();
//]]>
</script>
>>> fs.configure(options=[fs.foo.label('')])
>>> print fs.render()
<div>
<label class="field_opt" for="Two--foo">
</label>
<input id="Two--foo" name="Two--foo" type="text" value="133" />
</div>
<script type="text/javascript">
//<![CDATA[
document.getElementById("Two--foo").focus();
//]]>
</script>
>>> fs = FieldSet(Two)
>>> assert fs.render() == configure_and_render(fs, include=[fs.foo])
>>> assert fs.render() == configure_and_render(fs, exclude=[fs.id])
>>> fs = FieldSet(Two)
>>> fs.configure(include=[fs.foo.hidden()])
>>> print fs.render()
<input id="Two--foo" name="Two--foo" type="hidden" value="133" />
>>> fs = FieldSet(Two)
>>> fs.configure(include=[fs.foo.dropdown([('option1', 'value1'), ('option2', 'value2')])])
>>> print fs.render()
<div>
<label class="field_opt" for="Two--foo">
Foo
</label>
<select id="Two--foo" name="Two--foo">
<option value="value1">
option1
</option>
<option value="value2">
option2
</option>
</select>
</div>
<script type="text/javascript">
//<![CDATA[
document.getElementById("Two--foo").focus();
//]]>
</script>
>>> fs = FieldSet(Two)
>>> assert configure_and_render(fs, include=[fs.foo.dropdown([('option1', 'value1'), ('option2', 'value2')])]) == configure_and_render(fs, options=[fs.foo.dropdown([('option1', 'value1'), ('option2', 'value2')])])
>>> print pretty_html(fs.foo.with_html(onblur='test()').render())
<select id="Two--foo" name="Two--foo" onblur="test()">
<option value="value1">
option1
</option>
<option value="value2">
option2
</option>
</select>
>>> print fs.foo.reset().with_html(onblur='test').render()
<input id="Two--foo" name="Two--foo" onblur="test" type="text" value="133" />
# Test with_metadata()
>>> fs = FieldSet(Three)
>>> fs.configure(include=[fs.foo.with_metadata(instructions=u'Answer well')])
>>> print fs.render()
<div>
<label class="field_opt" for="Three--foo">
Foo
</label>
<input id="Three--foo" name="Three--foo" type="text" />
<span class="instructions">
Answer well
</span>
</div>
<script type="text/javascript">
//<![CDATA[
document.getElementById("Three--foo").focus();
//]]>
</script>
# test sync
>>> print session.query(One).count()
0
>>> fs_1 = FieldSet(One, data={}, session=session)
>>> fs_1.sync()
>>> session.flush()
>>> print session.query(One).count()
1
>>> session.rollback()
>>> twof = TwoFloat(id=1, foo=32.2)
>>> fs_twof = FieldSet(twof)
>>> print '%.1f' % fs_twof.foo.value
32.2
>>> print pretty_html(fs_twof.foo.render())
<input id="TwoFloat-1-foo" name="TwoFloat-1-foo" type="text" value="32.2" />
>>> import datetime
>>> twoi = TwoInterval(id=1, foo=datetime.timedelta(2.2))
>>> fs_twoi = FieldSet(twoi)
>>> fs_twoi.foo.renderer
<IntervalFieldRenderer for AttributeField(foo)>
>>> fs_twoi.foo.value
datetime.timedelta(2, 17280)
>>> print pretty_html(fs_twoi.foo.render())
<input id="TwoInterval-1-foo" name="TwoInterval-1-foo" type="text" value="2.17280" />
>>> fs_twoi.rebind(data={"TwoInterval-1-foo": "3.1"})
>>> fs_twoi.sync()
>>> new_twoi = fs_twoi.model
>>> new_twoi.foo == datetime.timedelta(3.1)
True
# test render and sync fatypes.Numeric
# http://code.google.com/p/formalchemy/issues/detail?id=41
>>> twon = TwoNumeric(id=1, foo=Decimal('2.3'))
>>> fs_twon = FieldSet(twon)
>>> print pretty_html(fs_twon.foo.render())
<input id="TwoNumeric-1-foo" name="TwoNumeric-1-foo" type="text" value="2.3" />
>>> fs_twon.rebind(data={"TwoNumeric-1-foo": "6.7"})
>>> fs_twon.sync()
>>> new_twon = fs_twon.model
>>> new_twon.foo == Decimal("6.7")
True
# test sync when TwoNumeric-1-foo is empty
>>> fs_twon.rebind(data={"TwoNumeric-1-foo": ""})
>>> fs_twon.sync()
>>> new_twon = fs_twon.model
>>> str(new_twon.foo)
'None'
>>> fs_cb = FieldSet(CheckBox)
>>> fs_cb.field.value is None
True
>>> print pretty_html(fs_cb.field.dropdown().render())
<select id="CheckBox--field" name="CheckBox--field">
<option value="True">
Yes
</option>
<option value="False">
No
</option>
</select>
# test no checkbox/radio submitted
>>> fs_cb.rebind(data={})
>>> fs_cb.field.raw_value is None
True
>>> fs_cb.field.value
False
>>> fs_cb.field.renderer.value is None
True
>>> print fs_cb.field.render()
<input id="CheckBox--field" name="CheckBox--field" type="checkbox" value="True" />
>>> fs_cb.field.renderer #doctest: +ELLIPSIS
<CheckBoxFieldRenderer for AttributeField(field)>
>>> fs_cb.field.renderer._serialized_value() is None
True
>>> print pretty_html(fs_cb.field.radio().render())
<input id="CheckBox--field_0" name="CheckBox--field" type="radio" value="True" />
<label for="CheckBox--field_0">
Yes
</label>
<br />
<input id="CheckBox--field_1" name="CheckBox--field" type="radio" value="False" />
<label for="CheckBox--field_1">
No
</label>
>>> fs_cb.validate()
True
>>> fs_cb.errors
{}
>>> fs_cb.sync()
>>> cb = fs_cb.model
>>> cb.field
False
>>> fs_cb.rebind(data={'CheckBox--field': 'True'})
>>> fs_cb.validate()
True
>>> fs_cb.sync()
>>> cb.field
True
>>> fs_cb.configure(options=[fs_cb.field.dropdown()])
>>> fs_cb.rebind(data={'CheckBox--field': 'False'})
>>> fs_cb.sync()
>>> cb.field
False
>>> fs = FieldSet(Two)
>>> print pretty_html(fs.foo.dropdown(options=['one', 'two']).radio().render())
<input id="Two--foo_0" name="Two--foo" type="radio" value="one" />
<label for="Two--foo_0">
one
</label>
<br />
<input id="Two--foo_1" name="Two--foo" type="radio" value="two" />
<label for="Two--foo_1">
two
</label>
>>> assert fs.foo.radio(options=['one', 'two']).render() == fs.foo.dropdown(options=['one', 'two']).radio().render()
>>> print fs.foo.radio(options=['one', 'two']).dropdown().render()
<select id="Two--foo" name="Two--foo">
<option value="one">one</option>
<option value="two">two</option>
</select>
>>> assert fs.foo.dropdown(options=['one', 'two']).render() == fs.foo.radio(options=['one', 'two']).dropdown().render()
>>> print pretty_html(fs.foo.dropdown(options=['one', 'two'], multiple=True).checkbox().render())
<input id="Two--foo_0" name="Two--foo" type="checkbox" value="one" />
<label for="Two--foo_0">
one
</label>
<br />
<input id="Two--foo_1" name="Two--foo" type="checkbox" value="two" />
<label for="Two--foo_1">
two
</label>
>>> fs = FieldSet(User, session=session)
>>> print fs.render()
<div>
<label class="field_req" for="User--email">
Email
</label>
<input id="User--email" maxlength="40" name="User--email" type="text" />
</div>
<script type="text/javascript">
//<![CDATA[
document.getElementById("User--email").focus();
//]]>
</script>
<div>
<label class="field_req" for="User--password">
Password
</label>
<input id="User--password" maxlength="20" name="User--password" type="text" />
</div>
<div>
<label class="field_opt" for="User--name">
Name
</label>
<input id="User--name" maxlength="30" name="User--name" type="text" />
</div>
<div>
<label class="field_opt" for="User--orders">
Orders
</label>
<select id="User--orders" multiple="multiple" name="User--orders" size="5">
<option value="2">
Quantity: 5
</option>
<option value="3">
Quantity: 6
</option>
<option value="1">
Quantity: 10
</option>
</select>
</div>
>>> fs = FieldSet(bill)
>>> print pretty_html(fs.orders.render())
<select id="User-1-orders" multiple="multiple" name="User-1-orders" size="5">
<option value="2">
Quantity: 5
</option>
<option value="3">
Quantity: 6
</option>
<option selected="selected" value="1">
Quantity: 10
</option>
</select>
>>> print pretty_html(fs.orders.checkbox().render())
<input id="User-1-orders_0" name="User-1-orders" type="checkbox" value="2" />
<label for="User-1-orders_0">
Quantity: 5
</label>
<br />
<input id="User-1-orders_1" name="User-1-orders" type="checkbox" value="3" />
<label for="User-1-orders_1">
Quantity: 6
</label>
<br />
<input checked="checked" id="User-1-orders_2" name="User-1-orders" type="checkbox" value="1" />
<label for="User-1-orders_2">
Quantity: 10
</label>
>>> print fs.orders.checkbox(options=session.query(Order).filter_by(id=1)).render()
<input checked="checked" id="User-1-orders_0" name="User-1-orders" type="checkbox" value="1" /><label for="User-1-orders_0">Quantity: 10</label>
>>> fs = FieldSet(bill, data={})
>>> fs.configure(include=[fs.orders.checkbox()])
>>> fs.validate()
True
>>> fs = FieldSet(bill, data={'User-1-orders': ['2', '3']})
>>> print pretty_html(fs.orders.render())
<select id="User-1-orders" multiple="multiple" name="User-1-orders" size="5">
<option selected="selected" value="2">
Quantity: 5
</option>
<option selected="selected" value="3">
Quantity: 6
</option>
<option value="1">
Quantity: 10
</option>
</select>
>>> fs.orders.model_value
[1]
>>> fs.orders.raw_value
[<Order for user 1: 10>]
>>> fs = FieldSet(Two)
>>> print fs.foo.render()
<input id="Two--foo" name="Two--foo" type="text" value="133" />
>>> fs = FieldSet(Two)
>>> print fs.foo.dropdown([('option1', 'value1'), ('option2', 'value2')]).render()
<select id="Two--foo" name="Two--foo">
<option value="value1">option1</option>
<option value="value2">option2</option>
</select>
>>> fs = FieldSet(Order, session)
>>> print fs.render()
<div>
<label class="field_req" for="Order--quantity">
Quantity
</label>
<input id="Order--quantity" name="Order--quantity" type="text" />
</div>
<script type="text/javascript">
//<![CDATA[
document.getElementById("Order--quantity").focus();
//]]>
</script>
<div>
<label class="field_req" for="Order--user_id">
User
</label>
<select id="Order--user_id" name="Order--user_id">
<option value="1">
Bill
</option>
<option value="2">
John
</option>
</select>
</div>
# this seems particularly prone to errors; break it out in its own test
>>> fs = FieldSet(order1)
>>> fs.user.value
1
# test re-binding
>>> fs = FieldSet(Order)
>>> fs.configure(pk=True, options=[fs.quantity.hidden()])
>>> fs.rebind(order1)
>>> fs.quantity.value
10
>>> fs.session == object_session(order1)
True
>>> print fs.render()
<div>
<label class="field_req" for="Order-1-id">
Id
</label>
<input id="Order-1-id" name="Order-1-id" type="text" value="1" />
</div>
<script type="text/javascript">
//<![CDATA[
document.getElementById("Order-1-id").focus();
//]]>
</script>
<input id="Order-1-quantity" name="Order-1-quantity" type="hidden" value="10" />
<div>
<label class="field_req" for="Order-1-user_id">
User
</label>
<select id="Order-1-user_id" name="Order-1-user_id">
<option selected="selected" value="1">
Bill
</option>
<option value="2">
John
</option>
</select>
</div>
>>> fs = FieldSet(One)
>>> fs.configure(pk=True)
>>> print fs.render()
<div>
<label class="field_req" for="One--id">
Id
</label>
<input id="One--id" name="One--id" type="text" />
</div>
<script type="text/javascript">
//<![CDATA[
document.getElementById("One--id").focus();
//]]>
</script>
>>> fs.configure(include=[])
>>> print fs.render()
<BLANKLINE>
>>> fs.configure(pk=True, focus=None)
>>> print fs.render()
<div>
<label class="field_req" for="One--id">
Id
</label>
<input id="One--id" name="One--id" type="text" />
</div>
>>> fs = FieldSet(One)
>>> fs.rebind(Two) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...
>>> fs = FieldSet(Two)
>>> fs.configure()
>>> fs2 = fs.bind(Two)
>>> [fs2 == field.parent for field in fs2._render_fields.itervalues()]
[True]
>>> fs = FieldSet(OTOParent, session)
>>> print fs.render()
<div>
<label class="field_req" for="OTOParent--oto_child_id">
Child
</label>
<select id="OTOParent--oto_child_id" name="OTOParent--oto_child_id">
<option value="1">
baz
</option>
</select>
</div>
<script type="text/javascript">
//<![CDATA[
document.getElementById("OTOParent--oto_child_id").focus();
//]]>
</script>
>>> fs.rebind(parent)
>>> fs.child.raw_value
<OTOChild baz>
# validation + sync
>>> fs_2 = FieldSet(Two, session=session, data={'Two--foo': ''})
>>> fs_2.foo.value # '' is deserialized to None, so default of 133 is used
'133'
>>> fs_2.validate()
True
>>> fs_2.configure(options=[fs_2.foo.required()], focus=None)
>>> fs_2.validate()
False
>>> fs_2.errors
{AttributeField(foo): ['Please enter a value']}
>>> print fs_2.render()
<div>
<label class="field_req" for="Two--foo">
Foo
</label>
<input id="Two--foo" name="Two--foo" type="text" value="133" />
<span class="field_error">
Please enter a value
</span>
</div>
>>> fs_2.rebind(data={'Two--foo': 'asdf'})
>>> fs_2.data
SimpleMultiDict([('Two--foo', u'asdf')])
>>> fs_2.validate()
False
>>> fs_2.errors
{AttributeField(foo): ['Value is not an integer']}
>>> print fs_2.render()
<div>
<label class="field_req" for="Two--foo">
Foo
</label>
<input id="Two--foo" name="Two--foo" type="text" value="asdf" />
<span class="field_error">
Value is not an integer
</span>
</div>
>>> fs_2.rebind(data={'Two--foo': '2'})
>>> fs_2.data
SimpleMultiDict([('Two--foo', u'2')])
>>> fs_2.validate()
True
>>> fs_2.errors
{}
>>> fs_2.sync()
>>> fs_2.model.foo
2
>>> session.flush()
>>> print fs_2.render() #doctest: +ELLIPSIS
Traceback (most recent call last):
...
PkError: Primary key of model has changed since binding, probably due to sync()ing a new instance (from None to 1)...
>>> session.rollback()
>>> fs_1 = FieldSet(One, session=session, data={'One--id': '1'})
>>> fs_1.configure(pk=True)
>>> fs_1.validate()
True
>>> fs_1.sync()
>>> fs_1.model.id
1
>>> fs_1.rebind(data={'One--id': 'asdf'})
>>> fs_1.id.renderer.name
u'One--id'
>>> fs_1.validate()
False
>>> fs_1.errors
{AttributeField(id): ['Value is not an integer']}
# test updating _bound_pk copy
>>> one = One(id=1)
>>> fs_11 = FieldSet(one)
>>> fs_11.id.renderer.name
u'One-1-id'
>>> one.id = 2
>>> fs_11.rebind(one)
>>> fs_11.id.renderer.name
u'One-2-id'
>>> fs_u = FieldSet(User, session=session, data={})
>>> fs_u.configure(include=[fs_u.orders])
>>> fs_u.validate()
True
>>> fs_u.sync()
>>> fs_u.model.orders
[]
>>> fs_u.rebind(User, session, data={'User--orders': [str(order1.id), str(order2.id)]})
>>> fs_u.validate()
True
>>> fs_u.sync()
>>> fs_u.model.orders == [order1, order2]
True
>>> session.rollback()
>>> fs_3 = FieldSet(Three, data={'Three--foo': 'asdf', 'Three--bar': 'fdsa'})
>>> fs_3.foo.value
u'asdf'
>>> print fs_3.foo.textarea().render()
<textarea id="Three--foo" name="Three--foo">asdf</textarea>
>>> print fs_3.foo.textarea("3x4").render()
<textarea cols="3" id="Three--foo" name="Three--foo" rows="4">asdf</textarea>
>>> print fs_3.foo.textarea((3,4)).render()
<textarea cols="3" id="Three--foo" name="Three--foo" rows="4">asdf</textarea>
>>> fs_3.bar.value
u'fdsa'
>>> def custom_validator(fs):
... if fs.foo.value != fs.bar.value:
... fs.foo.errors.append('does not match bar')
... raise ValidationError('foo and bar do not match')
>>> fs_3.configure(global_validator=custom_validator, focus=None)
>>> fs_3.validate()
False
>>> sorted(fs_3.errors.items())
[(None, ('foo and bar do not match',)), (AttributeField(foo), ['does not match bar'])]
>>> print fs_3.render()
<div class="fieldset_error">
foo and bar do not match
</div>
<div>
<label class="field_opt" for="Three--foo">
Foo
</label>
<input id="Three--foo" name="Three--foo" type="text" value="asdf" />
<span class="field_error">
does not match bar
</span>
</div>
<div>
<label class="field_opt" for="Three--bar">
Bar
</label>
<input id="Three--bar" name="Three--bar" type="text" value="fdsa" />
</div>
# custom renderer
>>> fs_3 = FieldSet(Three, data={'Three--foo': 'http://example.com/image.png'})
>>> fs_3.configure(include=[fs_3.foo.with_renderer(ImgRenderer)])
>>> print fs_3.foo.render()
<img src="http://example.com/image.png">
# natural PKs
>>> fs_npk = FieldSet(NaturalOrder, session)
>>> print fs_npk.render()
<div>
<label class="field_req" for="NaturalOrder--quantity">
Quantity
</label>
<input id="NaturalOrder--quantity" name="NaturalOrder--quantity" type="text" />
</div>
<script type="text/javascript">
//<![CDATA[
document.getElementById("NaturalOrder--quantity").focus();
//]]>
</script>
<div>
<label class="field_req" for="NaturalOrder--user_email">
User
</label>
<select id="NaturalOrder--user_email" name="NaturalOrder--user_email">
<option value="<EMAIL>">
Natural Bill
</option>
<option value="<EMAIL>">
<NAME>
</option>
</select>
</div>
>>> fs_npk.rebind(norder2, session, data={'NaturalOrder-2-user_email': nbill.email, 'NaturalOrder-2-quantity': str(norder2.quantity)})
>>> fs_npk.user_email.renderer.name
u'NaturalOrder-2-user_email'
>>> fs_npk.sync()
>>> fs_npk.model.user_email == nbill.email
True
>>> session.rollback()
# allow attaching custom attributes to wrappers
>>> fs = FieldSet(User)
>>> fs.name.baz = 'asdf'
>>> fs2 = fs.bind(bill)
>>> fs2.name.baz
'asdf'
# equality can tell an field bound to an instance is the same as one bound to a type
>>> fs.name == fs2.name
True
# Field
>>> fs = FieldSet(One)
>>> fs.add(Field('foo'))
>>> print configure_and_render(fs, focus=None)
<div>
<label class="field_opt" for="One--foo">
Foo
</label>
<input id="One--foo" name="One--foo" type="text" />
</div>
>>> fs = FieldSet(One)
>>> fs.add(Field('foo', types.Integer, value=2))
>>> fs.foo.value
2
>>> print configure_and_render(fs, focus=None)
<div>
<label class="field_opt" for="One--foo">
Foo
</label>
<input id="One--foo" name="One--foo" type="text" value="2" />
</div>
>>> fs.rebind(One, data={'One--foo': '4'})
>>> fs.sync()
>>> fs.foo.value
4
>>> fs = FieldSet(One)
>>> fs.add(Field('foo', types.Integer, value=2).dropdown(options=[('1', 1), ('2', 2)]))
>>> print configure_and_render(fs, focus=None)
<div>
<label class="field_opt" for="One--foo">
Foo
</label>
<select id="One--foo" name="One--foo">
<option value="1">
1
</option>
<option selected="selected" value="2">
2
</option>
</select>
</div>
# test Field __hash__, __eq__
>>> fs.foo == fs.foo.dropdown(options=[('1', 1), ('2', 2)])
True
>>> fs2 = FieldSet(One)
>>> fs2.add(Field('foo', types.Integer, value=2))
>>> fs2.configure(options=[fs2.foo.dropdown(options=[('1', 1), ('2', 2)])], focus=None)
>>> fs.render() == fs2.render()
True
>>> fs_1 = FieldSet(One)
>>> fs_1.add(Field('foo', types.Integer, value=[2, 3]).dropdown(options=[('1', 1), ('2', 2), ('3', 3)], multiple=True))
>>> print configure_and_render(fs_1, focus=None)
<div>
<label class="field_opt" for="One--foo">
Foo
</label>
<select id="One--foo" multiple="multiple" name="One--foo" size="5">
<option value="1">
1
</option>
<option selected="selected" value="2">
2
</option>
<option selected="selected" value="3">
3
</option>
</select>
</div>
>>> fs_1.rebind(One, data={'One--foo': ['1', '2']})
>>> fs_1.sync()
>>> fs_1.foo.value
[1, 2]
# test attribute names
>>> fs = FieldSet(One)
>>> fs.add(Field('foo'))
>>> fs.foo == fs['foo']
True
>>> fs.add(Field('add'))
>>> fs.add == fs['add']
False
# change default renderer
>>> class BooleanSelectRenderer(SelectFieldRenderer):
... def render(self, **kwargs):
... kwargs['options'] = [('Yes', True), ('No', False)]
... return SelectFieldRenderer.render(self, **kwargs)
>>> d = dict(FieldSet.default_renderers)
>>> d[types.Boolean] = BooleanSelectRenderer
>>> fs = FieldSet(CheckBox)
>>> fs.default_renderers = d
>>> print fs.field.render()
<select id="CheckBox--field" name="CheckBox--field">
<option value="True">Yes</option>
<option value="False">No</option>
</select>
# test setter rejection
>>> fs = FieldSet(One)
>>> fs.id = fs.id.required()
Traceback (most recent call last):
...
AttributeError: Do not set field attributes manually. Use append() or configure() instead
# join
>>> fs = FieldSet(Order__User)
>>> fs._fields.values()
[AttributeField(orders_id), AttributeField(orders_user_id), AttributeField(orders_quantity), AttributeField(users_id), AttributeField(users_email), AttributeField(users_password), AttributeField(users_name)]
>>> fs.rebind(session.query(Order__User).filter_by(orders_id=1).one())
>>> print configure_and_render(fs, focus=None)
<div>
<label class="field_req" for="Order__User-1_1-orders_quantity">
Orders quantity
</label>
<input id="Order__User-1_1-orders_quantity" name="Order__User-1_1-orders_quantity" type="text" value="10" />
</div>
<div>
<label class="field_req" for="Order__User-1_1-users_email">
Users email
</label>
<input id="Order__User-1_1-users_email" maxlength="40" name="Order__User-1_1-users_email" type="text" value="<EMAIL>" />
</div>
<div>
<label class="field_req" for="Order__User-1_1-users_password">
Users password
</label>
<input id="Order__User-1_1-users_password" maxlength="20" name="Order__User-1_1-users_password" type="text" value="<PASSWORD>" />
</div>
<div>
<label class="field_opt" for="Order__User-1_1-users_name">
Users name
</label>
<input id="Order__User-1_1-users_name" maxlength="30" name="Order__User-1_1-users_name" type="text" value="Bill" />
</div>
>>> fs.rebind(session.query(Order__User).filter_by(orders_id=1).one(), data={'Order__User-1_1-orders_quantity': '5', 'Order__User-1_1-users_email': bill.email, 'Order__User-1_1-users_password': '<PASSWORD>', 'Order__User-1_1-users_name': 'Bill'})
>>> fs.validate()
True
>>> fs.sync()
>>> session.flush()
>>> session.refresh(bill)
>>> bill.password == '<PASSWORD>'
True
>>> session.rollback()
>>> FieldSet.default_renderers[Point] = PointFieldRenderer
>>> fs = FieldSet(Vertex)
>>> print pretty_html(fs.start.render())
<input id="Vertex--start-x" name="Vertex--start-x" type="text" value="" />
<input id="Vertex--start-y" name="Vertex--start-y" type="text" value="" />
>>> fs.rebind(Vertex)
>>> v = fs.model
>>> v.start = Point(1,2)
>>> v.end = Point(3,4)
>>> print pretty_html(fs.start.render())
<input id="Vertex--start-x" name="Vertex--start-x" type="text" value="1" />
<input id="Vertex--start-y" name="Vertex--start-y" type="text" value="2" />
>>> fs.rebind(v)
>>> fs.rebind(data={'Vertex--start-x': '10', 'Vertex--start-y': '20', 'Vertex--end-x': '30', 'Vertex--end-y': '40'})
>>> fs.validate()
True
>>> fs.sync()
>>> session.add(v)
>>> session.flush()
>>> v.id
1
>>> session.refresh(v)
>>> v.start.x
10
>>> v.end.y
40
>>> session.rollback()
# readonly tests
>>> t = FieldSet(john)
>>> john.name = None
>>> t.configure(readonly=True)
>>> t.readonly
True
>>> print t.render()
<tbody>
<tr>
<td class="field_readonly">
Email:
</td>
<td>
<EMAIL>
</td>
</tr>
<tr>
<td class="field_readonly">
Password:
</td>
<td>
5678
</td>
</tr>
<tr>
<td class="field_readonly">
Name:
</td>
<td>
</td>
</tr>
<tr>
<td class="field_readonly">
Orders:
</td>
<td>
Quantity: 5, Quantity: 6
</td>
</tr>
</tbody>
>>> session.rollback()
>>> session.refresh(john)
>>> fs_or = FieldSet(order1)
>>> print fs_or.user.render_readonly()
<a href="mailto:<EMAIL>">Bill</a>
>>> out = FieldSet(OrderUserTag, session=session)
>>> list(sorted(out._fields))
['id', 'order_id', 'order_user', 'tag', 'user_id']
>>> print out.order_user.name
order_user
>>> out.order_user.is_raw_foreign_key
False
>>> out.order_user.is_composite_foreign_key
True
>>> list(sorted(out.render_fields))
['order_user', 'tag']
>>> print pretty_html(out.order_user.render())
<select id="OrderUserTag--order_user" name="OrderUserTag--order_user">
<option value="(1, 1)">
OrderUser(1, 1)
</option>
<option value="(1, 2)">
OrderUser(1, 2)
</option>
</select>
>>> out.rebind(data={'OrderUserTag--order_user': '(1, 2)', 'OrderUserTag--tag': 'asdf'})
>>> out.validate()
True
>>> out.sync()
>>> print out.model.order_user
OrderUser(1, 2)
>>> fs = FieldSet(Function)
>>> fs.configure(pk=True)
>>> fs.foo.render().startswith('<span')
True
>>> fs_r = FieldSet(Recursive)
>>> fs_r.parent_id.is_raw_foreign_key
True
>>> fs_r.rebind(data={'Recursive--foo': 'asdf'})
>>> fs_r.validate()
True
>>> fs_oo = FieldSet(OptionalOrder, session=session)
>>> fs_oo.configure(options=[fs_oo.user.with_null_as(('No user', ''))])
>>> fs_oo.user._null_option
('No user', '')
>>> print pretty_html(fs_oo.user.render())
<select id="OptionalOrder--user_id" name="OptionalOrder--user_id">
<option selected="selected" value="">
No user
</option>
<option value="1">
Bill
</option>
<option value="2">
John
</option>
</select>
>>> fs_oo = FieldSet(OptionalOrder)
>>> fs_oo.rebind(data={'OptionalOrder--user_id': fs_oo.user_id._null_option[1], 'OptionalOrder--quantity': ''})
>>> fs_oo.validate()
True
>>> fs_oo.user_id.value is None
True
>>> fs_bad = FieldSet(One)
>>> fs_bad.configure(include=[Field('invalid')])
Traceback (most recent call last):
...
ValueError: Unrecognized Field `AttributeField(invalid)` in `include` -- did you mean to call append() first?
>>> fs_s = FieldSet(Synonym)
>>> fs_s._fields
{'foo': AttributeField(foo), 'id': AttributeField(id)}
>>> fs_prefix = FieldSet(Two, prefix="myprefix")
>>> print(fs_prefix.render())
<div>
<label class="field_opt" for="myprefix-Two--foo">
Foo
</label>
<input id="myprefix-Two--foo" name="myprefix-Two--foo" type="text" value="133" />
</div>
<script type="text/javascript">
//<![CDATA[
document.getElementById("myprefix-Two--foo").focus();
//]]>
</script>
>>> fs_prefix.rebind(data={"myprefix-Two--foo": "42"})
>>> fs_prefix.validate()
True
>>> fs_prefix.sync()
>>> fs_prefix.model.foo
42
>>> fs_two = FieldSet(Two)
>>> fs_two.configure(options=[fs_two.foo.label('1 < 2')])
>>> print fs_two.render()
<div>
<label class="field_opt" for="Two--foo">
1 < 2
</label>
<input id="Two--foo" name="Two--foo" type="text" value="133" />
</div>
<script type="text/javascript">
//<![CDATA[
document.getElementById("Two--foo").focus();
//]]>
</script>
>>> fs_prop = FieldSet(Property)
>>> fs_prop.foo.is_readonly()
True
>>> fs_conflict = FieldSet(ConflictNames)
>>> fs_conflict.rebind(conflict_names)
>>> print fs_conflict.render() #doctest: +ELLIPSIS
<div>
...
"""
if __name__ == '__main__':
import doctest
doctest.testmod()
| 2.125
| 2
|
MDPN.py
|
hadisna/Makov-Decision-Process-Neural-Network-For-Global-Optimization
| 1
|
12775676
|
<filename>MDPN.py
import numpy as np
import math
import argparse, random
import matplotlib.pyplot as plt
def ChangeDomain(temp_e, umax, umin, Size, Codel, n, Po, temp_e3, temp_e2):
v_max = math.floor((n * Codel - 1) / Codel)
umax1 = np.array([[0.0] for i in range(v_max + 1)])
umin1 = np.array([[0.0] for i in range(v_max + 1)])
for j in range(0, n * Codel, Codel):
xx = temp_e[:, j]
xx = xx.reshape((-1, 1))
xx = np.array(sorted(xx))
dd = np.diff(np.concatenate([xx, [max(xx) + 1]], axis=0), axis=0)
tmp = np.concatenate([[[1]], dd], axis=0)
tmp3 = np.where(tmp > 0)[0]
count = np.diff(tmp3, axis=0)
count = count.reshape((-1, 1))
yy = np.concatenate([xx[np.where(dd > 0)[0]], count], axis=1)
yy = yy.T
JJ = yy.shape
y0 = 0.0
y1 = 0.0
y2 = 0.0
y3 = 0.0
y4 = 0.0
for i in range(JJ[1]):
if yy[0, i] == 0:
y0 = yy[1, i] / Size
elif yy[0, i] == 1:
y1 = yy[1, i] / Size
elif yy[0, i] == 2:
y2 = yy[1, i] / Size
elif yy[0, i] == 3:
y3 = yy[1, i] / Size
elif yy[0, i] == 4:
y4 = yy[1, i] / Size
v = math.floor(j / Codel)
umax1[v] = umax[v]
umin1[v] = umin[v]
if y0 > Po and temp_e[0, j] == 0:
umax[v] = (umax1[v] - umin1[v]) / 5 + umin1[v]
umin[v] = umin1[v]
if y1 > Po and temp_e[0, j] == 1:
umax[v] = (umax1[v] - umin1[v]) * 2 / 5 + umin1[v]
umin[v] = (umax1[v] - umin1[v]) / 5 + umin1[v]
if y2 > Po and temp_e[0, j] == 2:
umax[v] = (umax1[v] - umin1[v]) * 3 / 5 + umin1[v]
umin[v] = (umax1[v] - umin1[v]) * 2 / 5 + umin1[v]
if y3 > Po and temp_e[0, j] == 3:
umax[v] = (umax1[v] - umin1[v]) * 4 / 5 + umin1[v]
umin[v] = (umax1[v] - umin1[v]) * 3 / 5 + umin1[v]
if y4 > Po and temp_e[0, j] == 4:
umin[v] = (umax1[v] - umin1[v]) * 4 / 5 + umin1[v]
umax[v] = umax1[v]
if umax[v] != umax1[v] or umin[v] != umin1[v]:
QBack = temp_e[:, (j + 1):(v + 1) * Codel]
temp_e[:, j:((v + 1) * Codel - 1)] = QBack
temp_e[:, (v + 1) * Codel - 1] = np.squeeze(np.round(4 * np.random.rand(Size, 1)), axis=(1,))
temp_e[0, (v + 1) * Codel - 1] = 0
TE1 = temp_e3
QBack1 = TE1[:, (j + 1):(v + 1) * Codel]
temp_e2[:, j:((v + 1) * Codel - 1)] = QBack1
temp_e2[:, (v + 1) * Codel - 1] = np.squeeze(np.round(4 * np.random.rand(Size, 1)), axis=(1,))
RR = [0 for i in range(Size)]
for i in range(Size):
for j in range(n * Codel):
if temp_e[i, j] == temp_e[0, j] and j % (Codel-1) != 0:
RR[i] = RR[i] + (math.pow(5, (Codel - ((j + 1) % Codel)))) / constant
elif temp_e[i, j] == temp_e[0, j] and j % (Codel-1) == 0:
RR[i] = RR[i] + 1 / constant
for i in range(Size):
RR[i] = RR[i] + 1 / (i + 1)
OderRR = sorted(RR, reverse=True)
IndexRR = sorted(range(len(RR)), key=lambda x: RR[x], reverse=True)
TER = np.array([temp_e[IndexRR[i], :] for i in range(Size)])
temp_e = TER.copy()
temp_e1 = temp_e.copy()
return temp_e, umax, umin, temp_e1, temp_e2
def ackley(fval, n):
obj = 0.0
obj1 = 0.0
obj2 = 0.0
for i in range(n):
obj1 = obj1+fval[i]**2
obj2 = obj2+math.cos(2*math.pi*fval[i])
obj = -20*math.exp(-0.2*math.sqrt(obj1/n))-math.exp(obj2/n)+20+math.e
return obj
def rosenbrock(fval, n):
obj = 0
for i in range(n-1):
obj = obj+100*pow(fval[i+1]-pow((fval[i]), 2), 2)+pow((1-fval[i]), 2)
return obj
def griewank(fval, n):
obj = 0.0
objt = 0.0
objd = 1.0
for i in range(n):
objt = fval[i]**2.0/2.0+objt
objd = math.cos(fval[i]/math.sqrt(i+1))*objd
obj = objt-objd+1
return obj
def rastrigin(fval, n):
obj = 0.0
for i in range(n):
obj = obj+fval[i]**2-10*math.cos(2*math.pi*fval[i])
obj=obj+10*n
return obj
def evaluation(m, n, Codel, uma, umi):
y = [0.0 for i in range(n)]
x = [0.0 for i in range(n)]
r = [0.0 for i in range(n)]
for v in range(n):
y[v] = 0.0
mm[v] = m[[i for i in range(Codel * v, (v + 1) * Codel)]]
for i in range(Codel):
y[v] = y[v] + mm[v][i] * pow(5, Codel - (i + 1))
x[v] = (uma[v] - umi[v]) * y[v] / (5 ** Codel) + umi[v]
r[v] = x[v]
Fi = eval(obj_function)(r, n)
return Fi
parser = argparse.ArgumentParser(description='Markov Decision Process Neural Network for Global Optimization')
parser.add_argument('--obj_function', type=str, default='griewank')
parser.add_argument('--Size', type=int, default=100)
parser.add_argument('--G', type=int, default=100)
parser.add_argument('--Codel', type=int, default=4)
parser.add_argument('--umaxo', type=int, default=1000000)
parser.add_argument('--umino', type=int, default=-1000000)
parser.add_argument('--n', type=int, default=2)
parser.add_argument('--Po', type=float, default=0.8)
args = parser.parse_args()
obj_function = args.obj_function
Size = args.Size
G = args.G
Codel = args.Codel
umaxo = args.umaxo
umino = args.umino
n = args.n
Po = args.Po
mm = [[] for i in range(n)]
mmb = [[] for i in range(n)]
bfi = [None for _ in range(G)]
BS = [None for _ in range(G)]
Q = list([])
EX = list([])
EX1 = list([])
for i in range(5):
Q.append(np.round(4 * np.random.rand(Size, n * Codel)))
EX.append(np.round(4 * np.random.rand(Size, n * Codel)))
EX1.append(np.round(4 * np.random.rand(Size, n * Codel)))
umax = [umaxo for i in range(n)]
umin = [umino for i in range(n)]
constant = 0
for i in range(Codel):
constant = constant + n * (5 ^ i)
ax = []
ay = []
plt.ion()
for k in range(G):
F = [0 for s in range(15 * Size)]
for i in range(Size):
for j in range(Codel * n):
for b in range(4):
Q[b + 1][i, j] = (Q[b][i, j] + 1) % 5
EX[b + 1][i, j] = (EX[b][i, j] + 1) % 5
EX1[b + 1][i, j] = (EX1[b][i, j] + 1) % 5
E = np.concatenate((Q, EX, EX1), axis=0)
E = E.reshape(15 * Size, n * Codel)
for s in range(15 * Size):
m = E[s, :]
F[s] = evaluation(m, n, Codel, umax, umin)
fi = F
Oderfi = sorted(fi)
Indexfi = sorted(range(len(fi)), key=lambda x: fi[x])
Oderfi1 = sorted(fi, reverse=True)
Indexfi1 = sorted(range(len(fi)), key=lambda x: fi[x], reverse=True)
Bestfitness = Oderfi[0]
TempE = np.array([list(E[Indexfi[i]]) for i in range(Size)])
TempE2 = np.array([list(E[Indexfi1[i]]) for i in range(Size)])
TempE3 = np.array([list(E[Indexfi[i]]) for i in range(9 * Size, 10 * Size)])
TempE3 = np.flipud(TempE3)
BestS = TempE[0, :]
bfi[k] = Bestfitness
BS[k] = BestS
TempE, umax, umin, TempE1, TempE2 = ChangeDomain(TempE, umax, umin, Size, Codel, n, Po, TempE3, TempE2)
m = TempE[0, :]
F1 = evaluation(m, n, Codel, umax, umin)
for i in range(Size - 2):
for j in range(n * Codel):
if TempE[i, j] == TempE[i + 1, j] and TempE[i, j] != 4:
TempE[i + 1, j] = TempE[i + 1, j] + 1
elif TempE[i, j] == TempE[i + 1, j] and TempE[i, j] == 4:
TempE[i + 1, j] = 0
elif TempE[i, j] == TempE[i + 2, j] and TempE[i, j] != 4:
TempE[i + 2, j] = TempE[i + 2, j] + 1
elif TempE[i, j] == TempE[i + 2, j] and TempE[i, j] == 4:
TempE[i + 2, j] = 0
if TempE1[i, j] == TempE1[i + 1, j] and TempE1[i, j] != 0:
TempE1[i + 1, j] = TempE1[i + 1, j] - 1
elif TempE1[i, j] == TempE1[i + 1, j] and TempE1[i, j] == 0:
TempE1[i + 1, j] = 4
elif TempE1[i, j] == TempE1[i + 2, j] and TempE1[i, j] != 0:
TempE1[i + 2, j] = TempE1[i + 2, j] - 1
elif TempE1[i, j] == TempE1[i + 2, j] and TempE1[i, j] == 0:
TempE1[i + 2, j] = 4
if TempE2[i, j] == TempE2[i + 1, j] and TempE2[i, j] != 4:
TempE2[i + 1, j] = TempE2[i + 1, j] + 1
elif TempE2[i, j] == TempE2[i + 1, j] and TempE2[i, j] == 4:
TempE2[i + 1, j] = 0
elif TempE2[i, j] == TempE2[i + 2, j] and TempE2[i, j] != 4:
TempE2[i + 2, j] = TempE2[i + 2, j] + 1
elif TempE2[i, j] == TempE2[i + 2, j] and TempE2[i, j] == 4:
TempE2[i + 2, j] = 0
for i in range(Size - 1):
for j in range(n * Codel):
if TempE2[0, j] == TempE[i+1, j]:
TempE[i+1, j] == TempE[0, j]
if TempE2[0, j] == TempE1[i+1, j]:
TempE1[i + 1, j] = TempE1[0, j]
if TempE[0, j] == TempE2[i + 1, j]:
TempE2[i + 1, j] = TempE2[0, j]
TempE1[0, :] = np.round(4 * np.random.rand(1, n * Codel))
for i in range(Size):
for j in range(Codel * n):
Q[0][i, j] = TempE[i, j]
EX[0][i, j] = TempE1[i, j]
EX1[0][i, j] = TempE2[i, j]
yb = [0 for i in range(n)]
variables = [0 for i in range(n)]
for v in range(n):
yb[v] = 0
mmb[v] = m[[i for i in range(Codel * v, (v + 1) * Codel)]]
for i in range(Codel):
yb[v] = yb[v] + mm[v][i] * pow(5, Codel - (i + 1))
variables[v] = (umax[v] - umin[v]) * yb[v] / (5 ** Codel) + umin[v]
Fist = eval(obj_function)(variables, n)
print('Bestfitness3', k, Fist, variables, m, umax, umin)
ax.append(k)
ay.append(Fist)
plt.clf()
if len(ax) > 20:
plt.plot(ax[-20:-1], ay[-20:-1])
else:
plt.plot(ax, ay)
plt.pause(0.1)
plt.ioff()
| 1.953125
| 2
|
setup.py
|
wowngasb/pykl
| 0
|
12775677
|
from setuptools import setup, find_packages
setup(name='pykl',
version='0.1.6',
packages=find_packages(),
author='pykl',
author_email='<EMAIL>',
description='kltool for python, toolset for web, http, cache, dht, xml, json and so on',
long_description=open('README.rst').read(),
keywords='kltool html graphql xml json',
url='http://github.com/wowngasb/pykl',
license='MIT',
install_requires=[
'graphql-core==1.1',
'graphene==1.4',
'flask-graphql>=1.2.0',
'pyquery==1.2.11',
'requests==2.9.1',
'SQLAlchemy==1.1.15',
'six',
'singledispatch'
],
tests_require=[
])
| 1.132813
| 1
|
main.py
|
Karma-design/PyFlickrMining
| 1
|
12775678
|
<filename>main.py<gh_stars>1-10
# -*- coding: utf-8 -*-
print(__doc__)
# Code source: <NAME>
# License: BSD 3 clause
#HOW TO USE :
# 1.Set the input and output filenames
input_filename = 'lyon_cleaned_url_100' ##possible values : 'lyon_cleaned','suburb_cleaned','all_cleaned'
output_filename = 'clustering_v2'
#Set the pictures ratio : 1/(pictures ratio) pictures are displayed
pictRatio = 15
# 2.Comment the ununsed code (MeanShift or Kmeans, with plot)
# 3.If you use KMeans, don't forget to set the number of clusters
# NB : Clustering of large amount of data may take some time to perform using MeanShift
import pandas as pd
import numpy as np
import os
input_path = os.path.dirname(os.path.realpath(__file__))+'/%s.csv'
path = input_path% input_filename
df = pd.read_csv(path)
Xa = df[['latitude', 'longitude_normalized', 'longitude', 'user','First*(id)','First*(url)']].values #longitude_normalized on 2pos when possible
latitudeIdx = 0
longitudeNrmIdx = 1
longitudeIdx = 2
userIdx = 3
idIdx = 4
urlIdx = 5
###############################################################################
# Compute clustering with MeanShift
from sklearn.cluster import MeanShift
# The following bandwidth can be automatically detected using
bandwidth = 0.0022
ms = MeanShift(bandwidth=bandwidth,bin_seeding=True, cluster_all=False, min_bin_freq=15)
X = Xa[:, 0:2]
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)-1
print("number of estimated clusters : %d" % n_clusters_)
##############################
# Plot result
import pylab as pl
from itertools import cycle
pl.figure(1)
pl.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
pl.plot(X[my_members, 0], X[my_members, 1], col + '.')
pl.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
pl.title('Estimated number of clusters: %d' % n_clusters_)
pl.show()
####CLUSTERS JSON
json="var msg = '["
color = ['red','blue','purple','yellow','green','lightblue','orange','pink']
for k in range(n_clusters_):
if k != 0:
json += ","
json += "{"
##Longitude
json +="\"longitude\":"
my_members = labels == k
cluster_center = cluster_centers[k]
json += str(round(cluster_center[1]/1.43,4))
#json += cluster_center[1].astype('|S6')
json += ", "
##Latitude
json +="\"latitude\":"
my_members = labels == k
cluster_center = cluster_centers[k]
json += cluster_center[0].astype('|S6')
json += ", "
##Color
json +="\"color\":\""
json += color[k%8]
json += "\""
##
json += "}"
json += "]'; \n\n "
####
###PICTURES JSON
json+="var donnees = '["
for k in range(n_clusters_):
my_members = labels == k
for l in range(X[my_members,0].size/pictRatio):
if l+k != 0:
json += ","
json += "{"
##Longitude
json +="\"longitude\":"
array = Xa[my_members, longitudeIdx]
#json += str(cluster_center[1]/1.43)
json += str(array[l])#.astype('|S6')
json += ", "
##Latitude
json +="\"latitude\":"
array = Xa[my_members, latitudeIdx]
json += str(array[l])#.astype('|S6')
json += ", "
##Color
json +="\"color\":\""
json += color[k%8]
json += "\""
json += ", "
##Id
json +="\"id\":"
array = Xa[my_members, idIdx]
json += str(array[l])#.astype('|S6')
json += ", "
##url
json +="\"url\":\""
array = Xa[my_members, urlIdx]
json += str(array[l])#.astype('|S6')
json += "\", "
##User
json +="\"user\":\""
array = Xa[my_members, userIdx]
json += array[l]
json += "\"}"
json += "]';"
#Writing to text file
with open(os.path.dirname(os.path.realpath(__file__))+'/res/begin.html', 'r') as text_file:
begin=text_file.read()
with open(os.path.dirname(os.path.realpath(__file__))+'/res/end.html', 'r') as text_file:
end=text_file.read()
with open(os.path.dirname(os.path.realpath(__file__))+'/'+output_filename+'.html', "w") as text_file:
#Static file start
text_file.write("{0}".format(begin))
#Writing generated content
text_file.write("{0}".format(json))
#Static file ending
text_file.write("{0}".format(end))
###END OTHER JSON
###############################################################################
'''
###############################################################################
#Compute clustering with Kmeans
kmeans_n_clusters = 50
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=kmeans_n_clusters, n_init=10)
kmeans.fit(X)
##############################
# Plot Kmeans result
labels = kmeans.labels_
centroids = kmeans.cluster_centers_
from matplotlib import pyplot
import numpy as np
for i in range(kmeans_n_clusters):
# select only data observations with cluster label == i
ds = X[np.where(labels==i)]
# plot the data observations
pyplot.plot(ds[:,0],ds[:,1],'o')
# plot the centroids
lines = pyplot.plot(centroids[i,0],centroids[i,1],'kx')
# make the centroid x's bigger
pyplot.setp(lines,ms=15.0)
pyplot.setp(lines,mew=2.0)
pyplot.title('KMeans with %d clusters' % kmeans_n_clusters)
pyplot.show()
###############################################################################
'''
| 3.09375
| 3
|
src/abaqus/AbaqusCAEDisplayPreferences/CaeGuiPrefs.py
|
Haiiliin/PyAbaqus
| 7
|
12775679
|
<gh_stars>1-10
from abaqusConstants import *
class CaeGuiPrefs:
"""The CaeGuiPrefs object contains the details of the graphical preferences in a
guiPreferences section of the abaqus_2021.gpr file.
Attributes
----------
fileName: str
A String specifying the path to the preferences file.
Notes
-----
This object can be accessed by:
.. code-block:: python
import caePrefsAccess
caePrefsAccess.openGuiPreferences(...)
"""
# A String specifying the path to the preferences file.
fileName: str = ''
def save(self, backupFile: Boolean = OFF):
"""This method saves the guiPreferences section specified in the current **fileName**.
Parameters
----------
backupFile: Boolean
A Boolean specifying whether Abaqus should save a numbered backup copy of the
preferences file, *fileName*. Default is True.
"""
pass
def saveAs(self, fileName: str):
"""This method saves the guiPreferences settings to the specified location.
Parameters
----------
fileName: str
A String specifying the path to the preferences file.
"""
pass
| 2.46875
| 2
|
src/negotiating_agent/venv/lib/python3.8/site-packages/geniusweb/protocol/session/SessionState.py
|
HahaBill/CollaborativeAI
| 1
|
12775680
|
<reponame>HahaBill/CollaborativeAI
from abc import abstractmethod
from typing import List, Optional
from geniusweb.actions.Action import Action
from geniusweb.inform.Agreements import Agreements
from geniusweb.progress.Progress import Progress
from geniusweb.protocol.NegoState import NegoState
from geniusweb.protocol.session.SessionResult import SessionResult
from geniusweb.protocol.session.SessionSettings import SessionSettings
class SessionState(NegoState):
'''
The current state of the session. E.g. typically contains the actions so far
and the parties currently connected. <br>
The state checks if transitions (Actions from the party) are following the
protocol, and thus implement most of the protocol . <br>
If protocol errors occur, these should be stored in the state and the state
should become {@link #isFinal(long)}. Throwing should happen only in case of
a bug.<br>
Implementations should be immutable (to ensure thread safety, testability
etc).
States must be serializable so that listeners can follow what is going on in
the protocol. As uaual, mark non-serializable fields as transient.
'''
@abstractmethod
def getSettings(self)->SessionSettings:
'''
@return the SessionSettings
'''
@abstractmethod
def getActions(self)->List[Action]:
'''
@return unmodifyable list of actions done so far, in the order in which
they arrived. List of list allows implementations to add some
extra structure to the actions, eg one list per phase
'''
@abstractmethod
def getProgress(self) -> Optional[Progress]:
'''
@return the progress of the session. Can return null if the session did
not even start yet. Notice that the protocol determines when a
session officially starts (eg, in SAOP it starts after all
parties were connected succesfully).
'''
@abstractmethod
def getAgreements(self)->Agreements :
'''
@return the current standing agreements
An agreement does not necessarily mean {@link #isFinal(long)}.
'''
@abstractmethod
def getResult(self)->Optional[SessionResult]:
'''
@return the {@link SessionResult} which is a short report of the final
outcome. Assumes {@link #isFinal(long)}. result may be undefined
if not.
'''
| 2.125
| 2
|
lambda_function.py
|
tsuyo/LambdaWithTwilio
| 0
|
12775681
|
import re
import config
from datetime import datetime
from urllib2 import urlopen
from twilio.rest import TwilioRestClient
def validateStatus(site):
'''Return False to trigger the canary'''
return urlopen(site).getcode() == 200
def validateString(site):
p = re.compile(config.CHECK_STR)
return p.match(urlopen(site).read())
def lambda_handler(event, context):
print('Checking {} at {}...'.format(config.SITE, event['time']))
try:
if not validateString(config.SITE):
raise Exception('Validation failed')
except:
print('Check failed!')
make_call()
raise
else:
print('Check passed!')
return event['time']
finally:
print('Check complete at {}'.format(str(datetime.now())))
def make_call():
client = TwilioRestClient(config.ACCOUNT_SID, config.AUTH_TOKEN)
call = client.calls.create(
to=config.PHONE_TO,
from_=config.PHONE_FROM,
url=config.CALL_URL,
method="GET",
fallback_method="GET",
status_callback_method="GET",
record="false"
)
return call.sid
| 2.671875
| 3
|
webhook_relay.py
|
kav2k/friend_computer_webhook
| 0
|
12775682
|
#!/usr/bin/env python3
import time
from http.server import HTTPServer, BaseHTTPRequestHandler
import requests
import json
from functools import reduce
import re
class Server(BaseHTTPRequestHandler):
def do_HEAD(self):
self.send_json({
'success': False,
'message': 'Wrong method, POST expected'
}, 400)
def do_GET(self):
self.send_json({
'success': False,
'message': 'Wrong method, POST expected'
}, 400)
def do_POST(self):
def escaper(s):
s = s[1]
print("Replacing:\n{}".format(s))
s = s.replace("\\", "\\\\")
s = s.replace("\r\n", "\\n")
s = s.replace("\n", "\\n")
s = s.replace("\"", "\\\"")
return s
if self.headers.get("Content-type") != "application/json":
print(self.headers)
self.send_json({
'success': False,
'message': 'Wrong content type (\'application/json\' expected)'
}, 400)
else:
content_length = int(self.headers.get('Content-Length', 0))
post_body = self.rfile.read(content_length).decode('utf-8')
print("Decoded: {}".format(post_body))
post_body = re.sub(r"\<\<\<([\s\S]*)\>\>\>", escaper, post_body, flags=re.M)
print("Edited: {}".format(post_body))
data = json.loads(post_body)
if data.get('authKey') == config['authKey']:
error = self.relay(data)
if not error:
self.send_json({'success': True})
else:
self.send_json({'success': False, 'message': error})
else:
self.send_json({
'success': False,
'message': 'Auth key missing or incorrect'
}, 403)
def relay(self, data):
if "url" not in data:
return "No video URL received"
if "author" not in data:
return "No video author name received"
if "title" not in data:
return "No video title name received"
if "description" not in data:
return "No video description received"
post_data = {
"content":
"@here {author} uploaded **{title}** at {url}".format(
author=data["author"][:256],
title=data["title"][:256],
url=data["url"][:256]
)
}
print("POST head")
self.relay_json(post_data)
# requests.post(config["discordWebhookURL"], data=post_data)
descriptions = split_text(filter_text(data["description"]), 2048)
page = 1
for description in descriptions:
post_data = {
"embeds": [{
"type": "rich",
"description": description
}]
}
if page == 1:
post_data["embeds"][0]["title"] = data["title"][:256]
print("POST description {}".format(page))
page += 1
self.relay_json(post_data)
# requests.post(config["discordWebhookURL"], data=post_data)
return None
def relay_json(self, data):
requests.post(
config["discordWebhookURL"],
data=json.dumps(data).encode('utf-8'),
headers={
"Content-Type": "application/json"
}
)
def send_json(self, obj, status=200):
self.send_response(status)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(json.dumps(obj).encode())
def filter_text(text):
paragraphs = text.split("\n")
def try_match(line):
for regexp in patterns:
if regexp.match(line):
return False
return True
# Filter paragraphs according to config
paragraphs = [paragraph for paragraph in paragraphs if try_match(paragraph)]
return "\n".join(paragraphs)
def split_text(text, limit):
def paragraph_splitter(result, paragraph):
if len(paragraph) <= limit:
# If a paragraph can fit in one message, just add it
result.append(paragraph)
else:
# If a paragraph is too long, split it
while len(paragraph):
if len(paragraph) > limit:
# Remaining portion still too long
# Try to split at the last space possible
idx = paragraph.rfind(' ', 0, limit - 5) + 1
if idx < 1:
# If no space found, split as far as possible
idx = limit - 5
# Add the chopped-off portion, proceed with rest
result.append(paragraph[:idx])
paragraph = paragraph[idx:]
else:
# Remaining portion OK, just add it
result.append(paragraph)
paragraph = ""
if len(paragraph):
# If this was not the last portion, add continuation mark
result[-1] += "[...]"
return result
if limit < 6:
raise RuntimeError("Limit too narrow to split")
# Split text into paragraphs
paragraphs = text.split("\n")
# Split up paragraphs that are too long
paragraphs = reduce(paragraph_splitter, paragraphs, [])
# Each paragraph should already be small enough
for paragraph in paragraphs:
assert(len(paragraph) < limit)
# Assemble chunks as large as possible out of paragraphs
result = []
candidate = ""
quota = limit
for paragraph in paragraphs:
if len(paragraph) + 1 <= quota:
# We still have space for the paragraph + "\n"
if len(candidate) > 0:
candidate += "\n"
quota -= 1
candidate += paragraph
quota -= len(paragraph)
else:
# We can't add another paragraph, output current chunk
if len(candidate) > 0:
result.append(candidate)
candidate = ""
quota = limit
assert(len(paragraph) < quota)
# Start a new candidate chunk
candidate += paragraph
quota -= len(paragraph)
# Add last chunk, if non-empty
if len(candidate.strip()):
result.append(candidate)
# Strip extra "\n"
result = [part.strip() for part in result]
for part in result:
assert(len(part) < limit)
return result
if __name__ == '__main__':
global config, patterns
try:
with open("config.json") as config_file:
config = json.load(config_file)
except IOError:
print("Error reading config file")
exit(1)
patterns = []
for pattern in config.get("filters", []):
patterns.append(re.compile(pattern))
httpd = HTTPServer((config["host"], config["port"]), Server)
print(time.asctime(), 'Server UP - %s:%s' % (config["host"], config["port"]))
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
print(time.asctime(), 'Server DOWN - %s:%s' % (config["host"], config["port"]))
| 2.59375
| 3
|
test.py
|
harshkothari410/snn-image-segmentation
| 7
|
12775683
|
<reponame>harshkothari410/snn-image-segmentation<filename>test.py
import numpy as np
import matplotlib.pyplot as plt
import nengo
from nengo.dists import Uniform
model = nengo.Network(label='A Single Neuron')
with model:
neuron = nengo.Ensemble(1, dimensions=1, # Represent a scalar
intercepts=Uniform(-.5, -.5), # Set intercept to 0.5
max_rates=[20], # Set the maximum firing rate of the neuron to 100hz
encoders=[[1]]) # Sets the neurons firing rate to increase for positive input
with model:
my_node = nengo.Node(output=680)
with model:
nengo.Connection(my_node, neuron)
with model:
cos_probe = nengo.Probe(my_node, synapse=0.01) # The original input
spikes = nengo.Probe(neuron.neurons, synapse=0.01) # The raw spikes from the neuron
voltage = nengo.Probe(neuron.neurons, 'voltage', synapse=0.01) # Subthreshold soma voltage of the neuron
filtered = nengo.Probe(neuron, synapse=0.01) # Spikes filtered by a 10ms post-synaptic filter
sim = nengo.Simulator(model)
sim.run(0.01)
# print sim.data
plt.plot(sim.trange(), sim.data[filtered])
plt.plot(sim.trange(), sim.data[cos_probe])
plt.xlim(0, 0.01)
# Plot the spiking output of the ensemble
from nengo.utils.matplotlib import rasterplot
plt.figure(figsize=(10, 8))
plt.subplot(221)
rasterplot(sim.trange(), sim.data[spikes])
plt.ylabel("Neuron")
plt.xlim(0, 0.01)
import pylab
pylab.show()
| 2.765625
| 3
|
hogar/ResponseHandler.py
|
breyten/politwoops-hogar-bot
| 1
|
12775684
|
# The MIT License (MIT)
#
# Copyright (c) 2015 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from hogar.static import values as static_values
from hogar.Utils import PluginLoader
from hogar.Utils import Telegram
import ConfigParser
import traceback
import logging
logger = logging.getLogger(__name__)
class Response(object):
'''
The Hogar Response handler.
This class mediates the connection between actual
messages received by the API and the plugins
that are available for them.
'''
response = None
command_map = None
message_type = None
plugins = None
sender_information = {'id' : None, 'first_name' : None, 'last_name' : None, 'username' : None}
def __init__(self, response, command_map):
'''
Prepare a new Response() instance.
This __init__ will do a lot of the heavy lifting
when it comes to determining the message_type,
and which plugins are available for it.
--
@param response:dict The parsed Telegram response.
@param command_map:dict The command/type/plugin map.
@return None
'''
if not response['message']:
raise ValueError('No message payload in the response? WTF?')
logger.info('Processing message {message_id} from {first_name} {last_name} '.format(
message_id = response['message']['message_id'],
first_name = response['message']['from']['first_name'].encode('utf-8'),
last_name = response['message']['from']['last_name'].encode('utf-8') \
if 'last_name' in response['message']['from'] else ''
))
self.response = response['message']
self.command_map = command_map
self.message_type = self._get_message_type()
logger.info('Message {message_id} is a {type} message'.format(
message_id = response['message']['message_id'],
type = self.message_type
))
self.sender_information = self._get_sender_information()
self.plugins = self._find_applicable_plugins()
return
def _get_message_type(self):
'''
Get the message type.
--
@return str
'''
# Search for the message type
type_search = [message_type for message_type in static_values.possible_message_types \
if message_type in self.response]
# check that we only got 1 result back from the search
if len(type_search) > 1:
logger.warning('More than 1 message type found: ({res}). Selecting only the first entry'.format(
res = ', '.join(type_search)
))
return type_search[0]
def _get_sender_information(self):
'''
Get information about who sent a message.
--
@return dict
'''
sender = self.response
sender_information = {
'id' : self.response['chat']['id'],
'first_name' : self.response['from']['first_name'],
'last_name' : self.response['from']['last_name'] \
if 'last_name' in self.response['from'] else None,
'username' : '@{u}'.format(u = self.response['from']['username']) \
if 'username' in self.response['from'] else None
}
return sender_information
def _find_applicable_plugins(self):
'''
Find Applicable plugins based on message type.
--
@return dict
'''
# Text types are special for the fact that they can have
# command triggers too. We will only return a map
# of those that have the command.
#
# We will have to clean up the text and remove any '/' or
# '@' mentions so that the command may be triggered
if self.message_type == 'text':
text = self.response['text']
# Remove a mention. This could be the case
# if the bot was mentioned in a chat room
if text.startswith('@'):
text = text.split(' ', 1)[1].strip()
# Some bots will accept commands that started
# with a '/'
if text.startswith('/'):
# Remove the leading /
text = text.replace('/', '', 1).strip()
# If more than one bot is in a group chat, the
# Telegram client with have commands autocomleted
# as /action@bot_name. We will remove the mention
# in order to extract the command
text = text.split('@')[0].strip()
# Return all of the plugins that have the command
# defined as applicable, or any plugins that use
# the wildcard command
return [x for x in self.command_map['text'] \
if text.split(' ')[0].lower() in x['commands'] or '*' in x['commands']]
return self.command_map[self.message_type]
def check_acl(self):
'''
Check Access Control List
Check if the ACL features are enbaled for this bot.
If it is, ensure that the message was received
from someone that is either the bot owner or
a user.
--
@return bool
'''
# Read the configuration file. We do this here as the
# acls may have changed since the last time this
# module was loaded
config = ConfigParser.ConfigParser()
config.read('settings.ini')
message_from_id = str(self.response['from']['id'])
# Check if ACL processing is enabled
if not config.getboolean('acl', 'enabled'):
return True
if message_from_id not in config.get('acl', 'owners').split(',') \
and message_from_id not in config.get('acl', 'users').split(','):
logger.error('{first_name} ({id}) is not allowed to use this bot'.format(
first_name = self.response['from']['first_name'],
id = message_from_id
))
return False
return True
def run_plugins(self):
'''
Run Plugins
This is the main function responsible for executing
the plugins that have been identified for this
message.
--
@return None
'''
if not self.plugins:
logger.warning('No plugins matched for this message.')
return
#TODO: Handle if no plugins are applicable!
for plugin in self.plugins:
try:
logger.info('Running plugin: {plugin} for message {message_id}'.format(
plugin = plugin['name'],
message_id = self.response['message_id']
))
# Run the plugins run() method
plugin_output = plugin['plugin'].run(self.response)
except Exception, e:
logger.error('Plugin {plugin} failed with: {error}: {trace}'.format(
plugin = plugin['name'],
error = str(e),
trace = traceback.print_exc()
))
continue
# If we should be replying to the message,
# do it.
if (plugin['plugin'].should_reply()):
# Check what the reply type should be. Plugins
# that don't specify one will default to text
reply_type = 'text'
if hasattr(plugin['plugin'], 'reply_type'):
reply_type = plugin['plugin'].reply_type()
Telegram.send_message(self.sender_information, reply_type, plugin_output)
return
| 1.898438
| 2
|
src/gen_embeddings.py
|
anirban-code-to-live/gat-baseline
| 0
|
12775685
|
import os
import argparse
parser = argparse.ArgumentParser(description="Run attention2vec generation embeddings suite.")
parser.add_argument('--dataset', nargs='?', default='cora',
help='Input graph name for saving files')
args = parser.parse_args()
dataset = args.dataset
gamma = [10]
R = [0.5]
T = [3, 4]
train = [20, 30]
for g in gamma:
for r in R:
for t in T:
for tr in train:
print("----------------------------")
print("Parameters : ", g, r, t, tr)
print("----------------------------")
cmd = "python main.py --dataset {} --attn2v_iter {} --r {} --t {} --train_per {}".format(dataset, g, r, t, tr)
print(cmd + "\n")
os.system(cmd)
print("Done!")
| 2.765625
| 3
|
finlab/finlab_old/data.py
|
DandyWei/finlab_course_ml
| 0
|
12775686
|
<gh_stars>0
import sqlite3
import pandas as pd
import os
import datetime
class Data():
def __init__(self):
# 開啟資料庫
self.conn = sqlite3.connect(os.path.join('data', "data.db"))
cursor = self.conn.execute('SELECT name FROM sqlite_master WHERE type = "table"')
# 找到所有的table名稱
table_names = [t[0] for t in list(cursor)]
# 找到所有的column名稱,對應到的table名稱
self.col2table = {}
for tname in table_names:
# 獲取所有column名稱
c = self.conn.execute('PRAGMA table_info(' + tname + ');')
for cname in [i[1] for i in list(c)]:
# 將column名稱對應到的table名稱assign到self.col2table中
self.col2table[cname] = tname
# 初始self.date(使用data.get時,可以或的self.date以前的所有資料(以防拿到未來數據)
self.date = datetime.datetime.now().date()
# 假如self.cache是true的話,
# 使用data.get的資料,會被儲存在self.data中,之後再呼叫data.get時,就不需要從資料庫裡面找,
# 直接調用self.data中的資料即可
self.cache = False
self.data = {}
# 先將每個table的所有日期都拿出來
self.dates = {}
# 對於每個table,都將所有資料的日期取出
for tname in table_names:
c = self.conn.execute('PRAGMA table_info(' + tname + ');')
cnames = [i[1] for i in list(c)]
if 'date' in cnames:
if tname == 'price':
# 假如table是股價的話,則觀察這三檔股票的日期即可(不用所有股票日期都觀察,節省速度)
s1 = ("""SELECT DISTINCT date FROM %s where stock_id='0050'"""%('price'))
s2 = ("""SELECT DISTINCT date FROM %s where stock_id='1101'"""%('price'))
s3 = ("""SELECT DISTINCT date FROM %s where stock_id='2330'"""%('price'))
# 將日期抓出來並排序整理,放到self.dates中
df = (pd.read_sql(s1, self.conn)
.append(pd.read_sql(s2, self.conn))
.append(pd.read_sql(s3, self.conn))
.drop_duplicates('date').sort_values('date'))
df['date'] = pd.to_datetime(df['date'])
df = df.set_index('date')
self.dates[tname] = df
else:
# 將日期抓出來並排序整理,放到self.dates中
s = ("""SELECT DISTINCT date FROM '%s'"""%(tname))
self.dates[tname] = pd.read_sql(s, self.conn, parse_dates=['date'], index_col=['date']).sort_index()
#print('Data: done')
def get(self, name, n):
# 確認名稱是否存在於資料庫
if name not in self.col2table or n == 0:
print('Data: **ERROR: cannot find', name, 'in database')
return pd.DataFrame()
# 找出欲爬取的時間段(startdate, enddate)
df = self.dates[self.col2table[name]].loc[:self.date].iloc[-n:]
try:
startdate = df.index[-1]
enddate = df.index[0]
except:
print('Data: **WARRN: data cannot be retrieve completely:', name)
enddate = df.iloc[0]
# 假如該時間段已經在self.data中,則直接從self.data中拿取並回傳即可
if name in self.data and self.contain_date(name, enddate, startdate):
return self.data[name][enddate:startdate]
# 從資料庫中拿取所需的資料
s = ("""SELECT stock_id, date, [%s] FROM %s WHERE date BETWEEN '%s' AND '%s'"""%(name,
self.col2table[name], str(enddate.strftime('%Y-%m-%d')),
str((self.date + datetime.timedelta(days=1)).strftime('%Y-%m-%d'))))
ret = pd.read_sql(s, self.conn, parse_dates=['date']).pivot(index='date', columns='stock_id')[name]
# 將這些資料存入cache,以便將來要使用時,不需要從資料庫額外調出來
if self.cache:
self.data[name] = ret
return ret
# 確認該資料區間段是否已經存在self.data
def contain_date(self, name, startdate, enddate):
if name not in self.data:
return False
if self.data[name].index[0] <= startdate <= enddate <= self.data[name].index[-1]:
return True
return False
# 目前沒作用,不需使用
def get3(self, name):
s = ("""SELECT stock_id, %s FROM %s """%(name, self.col2table[name]))
return pd.read_sql(s, self.conn, index_col=['stock_id'])
| 3.09375
| 3
|
src/pyfuzz/string_mutations.py
|
fabriceyhc/pyfuzz
| 0
|
12775687
|
import random
def delete_random_character(s):
"""Returns s with a random character deleted"""
if s == "":
return s
pos = random.randint(0, len(s) - 1)
# print("Deleting", repr(s[pos]), "at", pos)
return s[:pos] + s[pos + 1:]
def insert_random_character(s):
"""Returns s with a random character inserted"""
pos = random.randint(0, len(s))
random_character = chr(random.randrange(32, 127))
# print("Inserting", repr(random_character), "at", pos)
return s[:pos] + random_character + s[pos:]
def flip_random_character(s):
"""Returns s with a random bit flipped in a random position"""
if s == "":
return s
pos = random.randint(0, len(s) - 1)
c = s[pos]
bit = 1 << random.randint(0, 6)
new_c = chr(ord(c) ^ bit)
# print("Flipping", bit, "in", repr(c) + ", giving", repr(new_c))
return s[:pos] + new_c + s[pos + 1:]
def mutate_strings(s):
"""Return s with a random mutation applied"""
mutators = [
delete_random_character,
insert_random_character,
flip_random_character
]
mutator = random.choice(mutators)
# print(mutator)
return mutator(s)
| 3.78125
| 4
|
Accountability Report/data_classes.py
|
ByrdOfAFeather/Catawba-County-ArcGis
| 0
|
12775688
|
<reponame>ByrdOfAFeather/Catawba-County-ArcGis
import xlrd
from data_transformation_functions import setup_nc_dataframe, setup_dicts, remove_section
from sklearn.preprocessing import LabelEncoder, PolynomialFeatures, StandardScaler
from sklearn.model_selection import train_test_split
class NCDatabase:
def __init__(self):
self.report = xlrd.open_workbook('Databases/acctsumm15.xlsx').sheet_by_index(0)
self.overall = setup_dicts()
self.overall_dataframe = self.overall[0]
self.overall_grades = self.overall[1]
self.database = setup_nc_dataframe(self.overall_grades, self.overall_dataframe)
def classification_setup(self, target_subject='Math', score_threshold=None):
"""Sets up the NC Database for classification based on input
:param target_subject: Target subject, valid options are "Math", "English", or "Biology"
:param score_threshold: Optional to split the database into two classes, below and above the threshold"""
# Given a score threshold: there are only two classes, one less than the score and one greater than the score
if score_threshold:
self.database.loc[self.database[target_subject] < score_threshold, target_subject] = 0
self.database.loc[self.database[target_subject] >= score_threshold, target_subject] = 1
else:
# Splits into 8 classes
self.database[target_subject][(self.database[target_subject] < 14)] = 0
self.database[target_subject][(self.database[target_subject] >= 14) & (self.database[target_subject] < 25)] = 1
self.database[target_subject][(self.database[target_subject] >= 25) & (self.database[target_subject] < 37)] = 2
self.database[target_subject][(self.database[target_subject] >= 37) & (self.database[target_subject] < 50)] = 3
self.database[target_subject][(self.database[target_subject] >= 50) & (self.database[target_subject] < 63)] = 4
self.database[target_subject][(self.database[target_subject] >= 63) & (self.database[target_subject] < 75)] = 5
self.database[target_subject][(self.database[target_subject] >= 75) & (self.database[target_subject] < 87.5)] = 6
self.database[target_subject][(self.database[target_subject] >= 87.5) & (self.database[target_subject] < 100)] = 7
# Sets up an encoder to encode school names
x_plot_encoder = LabelEncoder()
# Gets the full y-value vector
y = self.database[target_subject].values.astype(float)
# Removes the irrelevant sections of the original data set
x = remove_section(self.database, ['Biology', 'Math', 'English', 'StateNamePublicSchoolLatestavailableyear',
'LocationAddress1PublicSchool201415', 'LocationCityPublicSchool201415',
'LocationZIPPublicSchool201415', 'TitleISchoolStatusPublicSchool201415',
'LowestGradeOfferedPublicSchool201415',
'HighestGradeOfferedPublicSchool201415', 'District',
'Grades912StudentsPublicSchool201415',
'Grade12offeredPublicSchool201415',
'Grade11offeredPublicSchool201415',
'Grade10offeredPublicSchool201415',
'Grade9offeredPublicSchool201415'])
# Gets a dataset without the names of the schools
x_without_school_names = remove_section(x, ['SchoolNamePublicSchool201415'])
# Gets training and validation sets
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=.7, random_state=225530)
# Fits an encoder to the school names in the training set
x_train.SchoolNamePublicSchool201415 = x_plot_encoder.fit_transform(x_train.SchoolNamePublicSchool201415)
# gets the integer values of the school names as they are encoded
school_encoded_train = x_train.SchoolNamePublicSchool201415.astype(int)
# removes the school names from the training set
x_train = remove_section(x_train, ['SchoolNamePublicSchool201415'])
# creates a standard scaler and fits it to x_train
ka = StandardScaler().fit(x_train)
# scales x_train
x_train = ka.transform(x_train)
# Does the previous steps to the testing set
x_test.SchoolNamePublicSchool201415 = x_plot_encoder.fit_transform(x_test.SchoolNamePublicSchool201415)
school_encoded_test = x_test.SchoolNamePublicSchool201415
x_test = remove_section(x_test, ['SchoolNamePublicSchool201415',])
x_test = ka.transform(x_test)
# writes the database out to a csv
try:
x.to_csv('Databases/classification.csv')
except IOError:
print("Error writing database to file! Continuing...")
# Returns the segmented values for model building functions
return x_without_school_names, y, x_train, school_encoded_train, y_train, x_test, school_encoded_test, y_test
def regression_setup(self, target_subject='Math', degree=2):
"""Setups NC Database for regression
:param target_subject: Target subject, valid options are "Math", "English", or "Biology"
:param degree: Optional definition to declare the degree of polynomial features"""
# sets up target values
y = self.database[target_subject].astype(float).values
# Removes irrelevant values
x = remove_section(self.database, ['Biology', 'Math', 'English', 'StateNamePublicSchoolLatestavailableyear',
'LocationAddress1PublicSchool201415', 'LocationCityPublicSchool201415',
'LocationZIPPublicSchool201415', 'TitleISchoolStatusPublicSchool201415',
'LowestGradeOfferedPublicSchool201415',
'HighestGradeOfferedPublicSchool201415', 'District',
'Grades912StudentsPublicSchool201415',
'Grade12offeredPublicSchool201415',
'Grade11offeredPublicSchool201415',
'Grade10offeredPublicSchool201415',
'Grade9offeredPublicSchool201415'])
# Creates an encoder
x_plot_encoder = LabelEncoder()
# Gets rid of schools names and splits the data sets
x_without_school_names = remove_section(x, ['SchoolNamePublicSchool201415'])
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=.7, random_state=225)
# Fits the encoder to the school names and remove the sections
x_train.SchoolNamePublicSchool201415 = x_plot_encoder.fit_transform(x_train.SchoolNamePublicSchool201415)
school_encoded_train = x_train.SchoolNamePublicSchool201415.astype(int)
x_train = remove_section(x_train, ['SchoolNamePublicSchool201415'])
# Creates polynomial features
x_train = PolynomialFeatures(degree).fit_transform(x_train)
ka = StandardScaler().fit(x_train)
x_train = ka.transform(x_train)
# sets the same features up on the test set
x_test.SchoolNamePublicSchool201415 = x_plot_encoder.fit_transform(x_test.SchoolNamePublicSchool201415)
school_encoded_test = x_test.SchoolNamePublicSchool201415
x_test = remove_section(x_test, ['SchoolNamePublicSchool201415',])
x_test = PolynomialFeatures(degree).fit_transform(x_test)
x_test = ka.transform(x_test)
# Saves a copy of the current database
try:
x.to_csv('Databases/regression.csv')
except IOError:
print("Failed to save the database to file! Continuing....")
return x_without_school_names, y, x_train, school_encoded_train, y_train, x_test, school_encoded_test, y_test
| 3.1875
| 3
|
inbm/cloudadapter-agent/cloudadapter/cloud/client/handlers/echo_handler.py
|
ahameedx/intel-inb-manageability
| 5
|
12775689
|
<reponame>ahameedx/intel-inb-manageability
"""
Handler that echoes a message when a message is received.
"""
from typing import Callable, Optional
from ._handler import Handler
from ..connections.mqtt_connection import MQTTConnection
from ..utilities import Formatter
from ....utilities import make_threaded
import logging
logger = logging.getLogger(__name__)
class EchoHandler(Handler):
def __init__(self, topic_formatter: Formatter, payload_formatter: Formatter, subscribe_topic: Optional[str], connection: MQTTConnection) -> None:
"""Construct a generic handler
@param topic_formatter: (Formatter) Formatter for response publish topic
@param payload_formatter: (Formatter) Formatter for response payload
@param subscribe_topic: (str) Topic to subscribe for incoming messages
@param connection: (Connection) Connection to use
"""
self._topic_formatter = topic_formatter
self._payload_formatter = payload_formatter
self._connection = connection
self._connection.subscribe(subscribe_topic, make_threaded(self._on_message))
def bind(self, key: str, callback: Callable):
"""This is currently unused, but would be useful to allow sideffects on messages
@exception NotImplementedError: If called
"""
raise NotImplementedError("Callbacks for echo handler not implemented")
def _on_message(self, topic: str, payload: str):
"""Callback for subscribed cloud messages
@param topic: (str) Specific topic
@param payload: (str) Raw UTF-8 payload
"""
# Log the message
logger.info("Received message on %s: %s", topic, payload)
# Acknowledge the command
rid = self._connection.request_id
payload = self._payload_formatter.format(request_id=rid)
topic = self._topic_formatter.format(request_id=rid)
self._connection.publish(topic, payload)
| 2.546875
| 3
|
apps/fhirproxy/utils.py
|
thebureaugroup/sharemyhealth
| 0
|
12775690
|
<reponame>thebureaugroup/sharemyhealth
import requests
import urllib
from django.conf import settings
def fhir_get_access_token_with_client_credentials():
data = urllib.parse.urlencode({"client_id": settings.BACKEND_FHIR_CLIENT_ID,
"client_secret": settings.BACKEND_FHIR_CLIENT_SECRET,
"resource": settings.BACKEND_FHIR_RESOURCE,
"grant_type": "client_credentials"})
response = requests.post(
url=settings.BACKEND_FHIR_TOKEN_ENDPOINT, data=data)
reply = response.json()
# print(reply)
return reply['access_token']
def fhir_secured_request(fhir_endpoint, access_token, params={}):
print("Secure:", fhir_endpoint, params)
# accesstoken = FhirSecurity("https://nwt-staging.azurehealthcareapis.com")
header = {"Authorization": "Bearer " + access_token}
r = requests.get(fhir_endpoint, params=params, headers=header)
print(r.url)
return r
| 2.125
| 2
|
src/common/utils.py
|
edenbuaa/openpai-runtime
| 12
|
12775691
|
import logging
import re
import pystache
def init_logger():
logging.basicConfig(
format=
"%(asctime)s - %(levelname)s - %(filename)s:%(lineno)s - %(message)s",
level=logging.INFO,
)
def _convert_to_dict(obj) -> dict:
converted_obj = {}
if isinstance(obj, list):
for i, value in enumerate(obj):
converted_obj[str(i)] = value
elif isinstance(obj, dict):
for key, value in obj.items():
converted_obj[key] = _convert_to_dict(value)
else:
converted_obj = obj
return converted_obj
def enable_request_debug_log(func):
def wrapper(*args, **kwargs):
requests_log = logging.getLogger("urllib3")
level = requests_log.level
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
try:
return func(*args, **kwargs)
finally:
requests_log.setLevel(level)
requests_log.propagate = False
return wrapper
def render_string_with_secrets(string, secrets) -> str:
if not secrets:
return string
secret_dict = _convert_to_dict(secrets)
parsed = pystache.parse(string, delimiters=("<%", "%>"))
for token in parsed._parse_tree: #pylint: disable=protected-access
if isinstance(token, pystache.parser._EscapeNode): #pylint: disable=protected-access
token.key = re.sub(
r"\[(\d+)\]", r".\1",
token.key) # make format such as $secrets.data[0] works
return pystache.Renderer().render(parsed, {"$secrets": secret_dict})
| 2.3125
| 2
|
2020/day-17/process.py
|
MatthieuMichon/advent-of-code
| 1
|
12775692
|
#!/usr/bin/env python
"""
Advent of Code 2020: Day 16
"""
import copy
from collections import defaultdict
import math
import itertools
import signal
import sys
from types import FrameType
from typing import List, Mapping
from pathlib import Path
DEBUG = False
ACTIVE = True
INACTIVE = False
# Common -----------------------------------------------------------------------
def decode(file: Path) -> dict[tuple[int, int], bool]:
"""
Decode file contents
:param file: file containing the input values
:return: 2d map of the initial slice
"""
fh = open(file)
decoded_map = dict()
for y, l in enumerate(fh):
for x, c in enumerate(l.strip()):
active = c == '#'
decoded_map[(x, y)] = active
return decoded_map
def list_indexes(map_: dict[tuple, any], axis: int) -> list:
"""
List the indexes of a given axis in a mapping
:param map_: mapping of a property (activation) per grid position
:param axis: selected grid axis
:return: set of indexes across the given axis
"""
axis_count: int = len(next(iter(map_.keys())))
if axis >= axis_count:
return [0]
indexes = set(position[axis] for position in map_.keys())
index_list = sorted(indexes)
return index_list
def visualize(map_: dict[tuple, any]) -> None:
"""
Visualize slices of a mapping
:param map_: mapping of a property (activation) per grid position
:return: nothing
"""
conv = lambda pos, axis_cnt: \
('X' if map_[pos[:axis_cnt]] else ".") if isinstance(
map_[pos[:axis_cnt]], bool) else str(map_[pos[:axis_cnt]])
axis_count: int = len(next(iter(map_.keys())))
for w in list_indexes(map_, 3):
for z in list_indexes(map_, 2):
if axis_count == 4:
print(f'z={z}, w={w}')
elif axis_count == 3:
print(f'z={z}')
for y in list_indexes(map_, 1):
print(f'{" ".join(conv((x, y, z, w), axis_count) for x in list_indexes(map_, 0))}')
def execute_cycle(state: dict[tuple, bool]) -> dict[tuple[int, int, int], bool]:
"""
Execute one single state update cycle
:param state: 3d mapping of the state
:return: 3d mapping of the state
"""
expanded_state = state
axis_count: int = len(next(iter(state.keys())))
for axis in range(axis_count):
state = copy.copy(expanded_state)
axis_values = list_indexes(map_=state, axis=axis)
for upper in [True, False]:
index = max(axis_values) if upper else min(axis_values)
state_slice = {pos: v for pos, v in state.items()
if pos[axis] == index}
slice_active = any(state_slice.values())
if slice_active:
new_index = index + (1 if upper else -1)
for pos, s in state_slice.items():
new_pos = tuple(new_index if i == axis else a
for i, a in enumerate(pos))
expanded_state[new_pos] = False
if DEBUG:
visualize(expanded_state)
state_dd = defaultdict(bool, expanded_state)
active_neighbors_map = dict()
moves = [[-1, 0, +1]] * axis_count
self = tuple([0] * axis_count)
directions = [m for m in list(itertools.product(*moves)) if m != self]
for pos in expanded_state.keys():
active_neighbors = 0
for dir_ in directions:
neighbor = tuple(pos[axis] + dir_[axis] for axis in range(axis_count))
if state_dd[neighbor]:
active_neighbors += 1
active_neighbors_map[pos] = active_neighbors
if DEBUG:
visualize(active_neighbors_map)
updated_state = expanded_state #copy.copy(expanded_state)
for pos, count in active_neighbors_map.items():
cube_active = expanded_state[pos] == ACTIVE
neighbors_active = active_neighbors_map[pos]
if cube_active and neighbors_active not in [2, 3]:
updated_state[pos] = INACTIVE
elif not cube_active and neighbors_active == 3:
updated_state[pos] = ACTIVE
if DEBUG:
visualize(updated_state)
return updated_state
# Part One ---------------------------------------------------------------------
def process(file: Path) -> int:
"""
Process input file yielding the submission value
:param file: file containing the input values
:return: value to submit
"""
initial_slice = decode(file=file)
initial_state = {pos + tuple([0]): state for pos, state in initial_slice.items()}
if DEBUG:
visualize(map_=initial_state)
state = initial_state
for cycle in range(6):
if DEBUG:
visualize(state)
new_state = execute_cycle(state=state)
state = new_state
active_cubes = sum(state.values())
submission = active_cubes
return submission
# Part Two ---------------------------------------------------------------------
def process_part2(file: Path) -> int:
"""
Process input file yielding the submission value
:param file: file containing the input values
:return: value to submit
"""
initial_slice = decode(file=file)
initial_state = {pos + tuple([0, 0]): state
for pos, state in initial_slice.items()}
if DEBUG:
visualize(map_=initial_state)
state = initial_state
for cycle in range(6):
if DEBUG:
visualize(state)
new_state = execute_cycle(state=state)
state = new_state
active_cubes = sum(state.values())
submission = active_cubes
return submission
# Main -------------------------------------------------------------------------
def main() -> int:
"""
Main function
:return: Shell exit code
"""
files = ['./example.txt', './input.txt']
#files = ['./example.txt']
#files = []
for f in files:
print(f'In file {f}:')
print(f'\tPart One: {process(file=Path(f))}')
files = ['./example.txt', './input.txt']
#files = ['./example.txt']
#files = []
for f in files:
print(f'In file {f}:')
print(f'\tPart Two: {process_part2(file=Path(f))}')
return 0
def handle_sigint(signal_value: signal.Signals, frame: FrameType) -> None:
"""
Interrupt signal call-back method
:param signal_value: signal (expected SIGINT)
:param frame: current stack frame at the time of signal
:return: nothing
"""
assert signal_value == signal.SIGINT
print(frame.f_locals)
sys.exit(1)
if __name__ == '__main__':
signal.signal(signal.SIGINT, handle_sigint)
sys.exit(main())
| 2.9375
| 3
|
live_test.py
|
alanwatts36/attractiveness
| 11
|
12775693
|
<filename>live_test.py
import numpy as np
import cv2
from tensorflow.keras.models import load_model
import helper
if __name__ == "__main__":
model_name = 'attractiveNet_mnv2'
model_path = 'models/' + model_name + '.h5'
model = load_model(model_path)
cap = cv2.VideoCapture(0)
while(True):
ret, frame = cap.read()
score = model.predict(np.expand_dims(helper.preprocess_image(frame,(350,350)), axis=0))
text1 = f'AttractiveNet Score: {str(round(score[0][0],1))}'
text2 = "press 'Q' to exit"
cv2.putText(frame,text1, (10,50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,0), 2, cv2.LINE_AA)
cv2.putText(frame,text2, (10,100), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,0), 2, cv2.LINE_AA)
cv2.imshow('AttractiveNet',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| 2.703125
| 3
|
python/oracle.py
|
aeternity/dev-tools
| 0
|
12775694
|
#!/usr/bin/python
"""
Test oracle client
Author: <NAME>
Copyright (c) 2018 aeternity developers
Permission to use, copy, modify, and/or distribute this software for
any purpose with or without fee is hereby granted, provided that the
above copyright notice and this permission notice appear in all
copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
"""
import asyncio
from epoch import Epoch
import json
import os
from websocket import create_connection
class Oracle:
def __init__(self):
self.pub_key = os.environ['AE_PUB_KEY']
self.url = "ws://localhost:" + os.environ['AE_WEBSOCKET'] + "/websocket"
self.websocket = None
self.local_port = os.environ['AE_LOCAL_PORT']
self.local_internal_port = os.environ['AE_LOCAL_INTERNAL_PORT']
self.epoch = Epoch()
def connect_websocket(self):
if not self.websocket:
self.websocket = create_connection(self.url)
def register(self, query_format, response_format, query_fee, ttl, fee):
self.connect_websocket()
query = { "target": "oracle",
"action": "register",
"payload": { "type": "OracleRegisterTxObject",
"vsn": 1,
"account": self.pub_key,
"query_format": query_format,
"response_format": response_format,
"query_fee": int(query_fee),
"ttl": {"type": "delta",
"value": int(ttl)},
"fee": int(fee) } }
j = json.dumps(query)
print(j)
self.epoch.update_top_block()
self.websocket.send(j)
response = json.loads(self.websocket.recv())
if not response['payload']['result'] == "ok":
raise RuntimeError(response)
oracle_id = response['payload']['oracle_id']
self.epoch.wait_for_block()
return oracle_id
def wait_for_block(self):
self.epoch.update_top_block()
self.epoch.wait_for_block()
def subscribe(self, oracle_id, callback = None):
self.connect_websocket()
query = {"target": "oracle",
"action": "subscribe",
"payload": {"type": "query",
"oracle_id": oracle_id }}
j = json.dumps(query)
self.websocket.send(j)
while True:
response = json.loads(self.websocket.recv())
print(response)
if response['action'] == 'mined_block':
continue
if not response['payload']['result'] == 'ok':
raise RuntimeError(response)
id = response['payload']['subscribed_to']['oracle_id']
break
mining_events = 0
while True:
data = self.websocket.recv()
j = json.loads(data)
print(j)
if j['action'] == 'mined_block':
mining_events += 1
continue
if j['action'] == 'new_oracle_query':
if callback:
callback(j)
else:
print("Unhandled")
if mining_events == 0:
self.epoch.wait_for_block()
def query(self, oracle_pubkey, query_fee, query_ttl, response_ttl,
fee, query):
self.connect_websocket()
request = {"target": "oracle",
"action": "query",
"payload": {"type": "OracleQueryTxObject",
"vsn": 1,
"oracle_pubkey": oracle_pubkey,
"query_fee": int(query_fee),
"query_ttl": {"type": "delta",
"value": int(query_ttl)},
"response_ttl": {"type": "delta",
"value": int(response_ttl)},
"fee": int(fee),
"query": query }}
j = json.dumps(request)
print(j)
self.websocket.send(j)
response = self.websocket.recv()
print(response)
response = json.loads(response)
if response['payload']['result'] == "ok":
return response['payload']['query_id']
self.epoch.wait_for_block()
return False
def subscribe_query(self, query_id, callback = None):
self.connect_websocket()
request = {"target": "oracle",
"action": "subscribe",
"payload": {"type": "response",
"query_id": query_id }}
j = json.dumps(request)
print(j)
self.websocket.send(j)
# check response, might have to consume a block mined message
while True:
blocks_mined = 0
response = self.websocket.recv()
response = json.loads(response)
print(response)
if response['action'] == 'mined_block':
blocks_mined += 1
continue
if response['action'] == 'new_oracle_response':
if callback:
callback(response['payload'])
else:
print(response['payload'])
break
# Should we get here?
if not response['payload']['result'] == 'ok':
raise RuntimeError(response)
def respond(self, query_id, fee, reply):
self.connect_websocket()
response = {"target": "oracle",
"action": "response",
"payload": {"type": "OracleResponseTxObject",
"vsn": 1,
"query_id": query_id,
"fee": int(fee),
"response": reply}}
response = json.dumps(response)
print(response)
self.websocket.send(response)
| 2.09375
| 2
|
setup.py
|
Lyrichu/sopt
| 40
|
12775695
|
#!/usr/bin/env python
# coding=utf-8
from setuptools import setup,find_packages
setup(
name="sopt",
version="0.0.6.1",
description="sopt:a simple python optimizer library",
long_description=
'''
sopt is a simple python optimizer library.Currentlly,it includes some stochastic optimization
algorithms,like Genetic Algorithm(GA),Particle Swarm Optimization(PSO),Simulated Anealing
(SA),Random Walk(and its improvement version),and some gradient based optimizers,like Gradient
Descent,Momentum,AdaGrad,RMSProp and Adam Optimizers.For the GA optimizers,it includes many
kinds of different selected methods,mutation methods etc,as well as for PSO and other optimizers,
so you can try many different kinds of optimizers with different settings,all the stochastic optimization
also supports the non-linear complex constraints by using penalty methods or dropout-bad-solution methods.
''',
author='lyrichu',
author_email='<EMAIL>',
url = "http://www.github.com/Lyrichu",
maintainer='lyrichu',
maintainer_email='<EMAIL>',
packages=['sopt','sopt/GA','sopt/SGA','sopt/PSO','sopt/test','sopt/util','sopt/Optimizers'],
package_dir={'sopt': 'sopt'},
install_requires=['numpy']
)
| 1.664063
| 2
|
app/code/data_cleaning.py
|
tanviwagh/Movies_Analysis
| 0
|
12775696
|
<reponame>tanviwagh/Movies_Analysis
import boto3
from pyspark.sql.functions import col, regexp_extract, regexp_replace, explode, split, udf, date_format, to_date
from pyspark.sql.types import DoubleType, DateType, IntegerType
def process(spark, config):
data_folder_name = config['data']['data_folder_name']
parquet_folder_name = config['data']['parquet_folder_name']
emr_path = config['paths']['emr_path']
movie_tbl_name = config['athena']['movie_tbl_name']
genre_tbl_name = config['athena']['genre_tbl_name']
artist_tbl_name = config['athena']['artist_tbl_name']
music_tbl_name = config['athena']['music_tbl_name']
director_tbl_name = config['athena']['director_tbl_name']
producer_tbl_name = config['athena']['producer_tbl_name']
writer_tbl_name = config['athena']['writer_tbl_name']
s3_bucket_path = config['s3_bucket_details']['s3_bucket_path']
bucket_name = config['s3_bucket_details']['s3_bucket_data_path']
bucket= bucket_name.split('/')[2:]
bucket_name= '/'.join(bucket)
# s3_client = connect_to_aws_service_client('s3')
s3_client = boto3.client('s3')
all_dirs = []
for obj_list in s3_client.list_objects(Bucket=bucket_name)['Contents']:
key = obj_list['Key']
key = key.split('/')[:2]
key = '/'.join(key)
all_dirs.append(key)
all_dirs = set(all_dirs)
all_dirs = list(all_dirs)
for key in all_dirs:
input_df = read_json(spark, data_folder_name, key, bucket_name)
non_null_df = fill_null_values(input_df)
formatted_df = clean_date_column(non_null_df)
formatted_df = convert_list_column(formatted_df)
base_df = input_df
cols_list = ['movie_cast','music_department', 'genre','director_name', 'writer_name', 'producer_name']
for col_name in cols_list:
d_type = dict(base_df.dtypes)[col_name]
if d_type == 'string':
base_df = convert_to_array_type(base_df, col_name)
converted_df = base_df
for col_name in cols_list:
d_type = dict(converted_df.dtypes)[col_name]
if d_type == 'array<string>':
converted_df = explode_array_columns(converted_df, col_name)
unique_movie_df = formatted_df.dropDuplicates()
unique_movie_df = remove_quotes(unique_movie_df, True)
converted_df = remove_quotes(converted_df, False)
idx = 0
movie_cast_df = converted_df.select(col('imdbID'), col(cols_list[idx])).dropDuplicates()
music_department_df = converted_df.select(col('imdbID'), col(cols_list[idx+1])).dropDuplicates()
genres_df = converted_df.select(col('imdbID'), col(cols_list[idx+2])).dropDuplicates()
directors_df = converted_df.select(col('imdbID'), col(cols_list[idx+3])).dropDuplicates()
writers_df = converted_df.select(col('imdbID'), col(cols_list[idx+4])).dropDuplicates()
producers_df = converted_df.select(col('imdbID'), col(cols_list[idx+5])).dropDuplicates()
output_path = 's3://' + bucket_name + '/' + parquet_folder_name + '/' + movie_tbl_name
save_to_parquet(unique_movie_df, output_path)
output_path = 's3://' + bucket_name + '/' + parquet_folder_name + '/' + artist_tbl_name
save_to_parquet(movie_cast_df, output_path)
output_path = 's3://' + bucket_name + '/' + parquet_folder_name + '/' + music_tbl_name
save_to_parquet(music_department_df, output_path)
output_path = 's3://' + bucket_name + '/' + parquet_folder_name + '/' + genre_tbl_name
save_to_parquet(genres_df, output_path)
output_path = 's3://' + bucket_name + '/' + parquet_folder_name + '/' + director_tbl_name
save_to_parquet(directors_df, output_path)
output_path = 's3://' + bucket_name + '/' + parquet_folder_name + '/' + producer_tbl_name
save_to_parquet(producers_df, output_path)
output_path = 's3://' + bucket_name + '/' + parquet_folder_name + '/' + writer_tbl_name
save_to_parquet(writers_df, output_path)
def read_json(spark, folder_name, key, bucket_name):
dataframe = spark.read.option("multiline","true").json('s3://'+ bucket_name + '/' + key + "/*.json")
cols = [ col('imdbID'), col('localized title'), col('languages'), col('runtimes'), col('original air date'),
col('plot'), col('cast'), col('music department'), col('genres'),
col('directors'), col('writers'), col('producers') ]
dataframe = dataframe.select(*cols)
dataframe = dataframe.withColumnRenamed("localized title", "movie_title")
dataframe = dataframe.withColumnRenamed("music department", "music_department")
dataframe = dataframe.withColumnRenamed("cast", "movie_cast")
dataframe = dataframe.withColumnRenamed("original air date", "original_air_date")
dataframe = dataframe.withColumnRenamed("runtimes", "runtime")
dataframe = dataframe.withColumnRenamed("languages", "language")
dataframe = dataframe.withColumnRenamed("genres", "genre")
dataframe = dataframe.withColumnRenamed("directors", "director_name")
dataframe = dataframe.withColumnRenamed("writers", "writer_name")
dataframe = dataframe.withColumnRenamed("producers", "producer_name")
dataframe = dataframe.withColumn("imdbID",dataframe.imdbID.cast(DoubleType()))
dataframe = dataframe.withColumn("runtime",dataframe.runtime.cast(IntegerType()))
return dataframe
def fill_null_values(dataframe):
null_dict = {'imdbID': 0, 'runtime': 0, 'original_air_date': '31 Dec 9999 (Unknown)', 'movie_title': 'unknown'}
dataframe = dataframe.na.fill(null_dict)
other_cols_list = [ 'language' , 'plot', 'movie_cast', 'music_department',
'genre', 'director_name', 'writer_name', 'producer_name']
for col_name in other_cols_list:
d_type = dict(dataframe.dtypes)[col_name]
if d_type == 'string':
dataframe = dataframe.na.fill(value="unknown", subset=[col_name])
return dataframe
def clean_date_column(dataframe):
cols = [col('imdbID'), col('movie_title'), col('language'), col('runtime'), col('original_air_date'),
col('original_air_date_country'), col('plot')]
temp_dataframe = dataframe.withColumn("original_air_date_country", regexp_extract('original_air_date', r'\([^)]*\)', 0))
result_dataframe = temp_dataframe.withColumn("original_air_date", regexp_replace('original_air_date', r'\([^)]*\)', "")).select(*cols)
final_dataframe = result_dataframe.withColumn("original_air_date_country", regexp_replace('original_air_date_country', r'\)|\(', "")).select(*cols)
final_dataframe = final_dataframe.withColumn("original_air_date", regexp_replace('original_air_date', ' ', "")).select(*cols)
final_dataframe = final_dataframe.withColumn('original_air_date', date_format(to_date('original_air_date', 'ddMMMyyyy'), 'yyyy-MM-dd')).select(*cols)
final_dataframe = final_dataframe.withColumn("original_air_date", final_dataframe.original_air_date.cast(DateType()))
return final_dataframe
def convert_list_column(dataframe):
split_udf = udf(lambda x: x.split(',')[0])
split_dataframe = dataframe.withColumn("language", split_udf(col("language")))
temp_dataframe = split_dataframe.withColumn("language", regexp_replace('language', r'\]|\[', ""))
if dict(temp_dataframe.dtypes)["plot"] != "string":
join_udf = udf(lambda x: ",".join(x))
final_dataframe = temp_dataframe.withColumn("plot", join_udf(col("plot")))
else:
final_dataframe = temp_dataframe
final_dataframe = final_dataframe.withColumn("plot", regexp_replace('plot', r'\]|\[', ""))
return final_dataframe
def save_to_parquet(dataframe, parquet_path):
dataframe.write.mode('append').parquet(parquet_path)
def convert_to_array_type(dataframe, col_name):
array_dataframe = dataframe.withColumn(col_name, split(dataframe[col_name],",")).\
select(col('imdbID'), col('movie_cast'), col('music_department'),
col('genre'), col('director_name'), col('writer_name'),
col('producer_name')).withColumnRenamed('col', col_name)
return array_dataframe
def explode_array_columns(dataframe, col_name):
exploded_dataframe = dataframe.withColumn(col_name, explode(dataframe[col_name])).\
select(col('imdbID'), col('movie_cast'), col('music_department'),
col('genre'), col('director_name'), col('writer_name'),
col('producer_name')).withColumnRenamed('col', col_name)
exploded_dataframe = exploded_dataframe.withColumn(col_name, regexp_replace(col_name, r'\]|\[', ""))
return exploded_dataframe
def remove_quotes(dataframe, movie_flag):
if movie_flag == True:
cols = [ col('imdbID'), col('movie_title'), col('language'), col('runtime'), col('original_air_date'),
col('original_air_date_country'), col('plot') ]
dataframe = dataframe.select(*cols)
final_dataframe = dataframe.withColumn('language', regexp_replace('language', '"', ''))
final_dataframe = final_dataframe.withColumn('plot', regexp_replace('plot', '"', ''))
else:
cols = [ col('imdbID'), col('movie_cast'), col('music_department'), col('genre'),
col('director_name'), col('writer_name'), col('producer_name') ]
dataframe = dataframe.select(*cols)
final_dataframe = dataframe.withColumn('movie_cast', regexp_replace('movie_cast', '"', ''))
final_dataframe = final_dataframe.withColumn('music_department', regexp_replace('music_department', '"', ''))
final_dataframe = final_dataframe.withColumn('genre', regexp_replace('genre', '"', ''))
final_dataframe = final_dataframe.withColumn('director_name', regexp_replace('director_name', '"', ''))
final_dataframe = final_dataframe.withColumn('writer_name', regexp_replace('writer_name', '"', ''))
final_dataframe = final_dataframe.withColumn('producer_name', regexp_replace('producer_name', '"', ''))
return final_dataframe
| 2.828125
| 3
|
pycoind/blockchain/transaction.py
|
peerchemist/pycoind
| 120
|
12775697
|
<gh_stars>100-1000
# The MIT License (MIT)
#
# Copyright (c) 2014 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Transaction Database
#
# txck - transaction composite key (see below)
# txid_hint - hash integer, provides pruning to likely txid
# txn - the binary blob of the transaction
#
# The database is broken up into files about 1.75GB each (so file systems like
# FAT32 work). The database filename contains two numbers, a number of
# partitions (N) and an index (i) which is in the range [0, N). These files
# will be denoted as file(N, i)
#
# When inserting, we insert into the highest N. Given an id, we insert into
# file(N, get_q(txid) % N). The function get_q hash bytes into an integer
#
# When searching, we must check each partition level, so to search for id, we
# start at the highest N, and check:
# 1. file(N, get_q(txid) % N)
# 2. file(N / 2, get_q(txid) % (N / 2))
# 3. file(N / 4, get_q(txid) % (N / 4))
# and so on, until we reach a k, such that (N / (2 ** k)) < 4.
#
# We can also, over time migrate values into higher levels. This is a future
# todo, if performance becomes an issue.
# Composite Keys
#
# We use composite keys so we can optimize space with the 8-byte rowid we get
# by default in a sqlite database as well as the speed gain as they are the
# keys in the B-Tree. (see: http://www.sqlite.org/lang_createtable.html#rowid)
#
# txck (transaction-composite-key: 43 bits)
# - (block-id:23 bits) (txn-index:20 bits)
#
# With these keys, we can support up to 8 million blocks, each block with up
# to 1 million transactions.
# Hints
#
# A hint (hash integer) the integer value of a byte string to quickly prune
# any obviously non-matching elements. The remaining elements must then be
# compared against confirmed values, since the hash may yield false positives.
import os
import random
import sqlite3
import struct
from . import database
from . import keys
from .. import coins
from .. import protocol
from .. import script
from .. import util
__all__ = ['Database']
def get_q(txid):
'Compute the index q from a txid.'
return struct.unpack('>I', txid[:4])[0]
_KEY_DUP = 'PRIMARY KEY must be unique'
_0 = chr(0) * 32
class Transaction(object):
def __init__(self, database, row, _transaction = None):
keys = [n for (n, t, i) in database.Columns]
self._database = database
self._data = dict(zip(keys, row))
# cache for previous outputs' transactions, since it hits the database
self._po_cache = dict()
self._transaction = _transaction
version = property(lambda s: s.txn.version)
inputs = property(lambda s: s.txn.tx_in)
outputs = property(lambda s: s.txn.tx_out)
lock_time = property(lambda s: s.txn.lock_time)
hash = property(lambda s: s.txn.hash)
index = property(lambda s: keys.get_txck_index(s._txck))
def __getstate__(self):
return (self._po_cache, dict(txn = str(self._data['txn']), txck = self._data['txck']))
def __setstate__(self, state):
self._database = None
(self._po_cache, self._data) = state
self._transaction = None
def cache_previous_outputs(self):
for i in xrange(0, len(self.inputs)):
self.previous_transaction(i)
def previous_transaction(self, index):
"Returns the previous output's transaction for the input at index."
# coinbase transaction
if self.index == 0 and index == 0:
return None
# look up the previous output's transaction and cache it
if index not in self._po_cache:
po_hash = self.inputs[index].previous_output.hash
previous_txn = self._database.get(po_hash)
if previous_txn is None:
raise KeyError('missing transaction: %s' % po_hash)
self._po_cache[index] = previous_txn
# return the cache value
return self._po_cache[index]
def previous_output(self, index):
'Returns the previous output for the input at index.'
previous_txn = self.previous_transaction(index)
if previous_txn is None: return None
po = self.inputs[index].previous_output
return previous_txn.outputs[po.index]
def __str__(self):
return "<Transaction hash=0x%s>" % self.hash.encode('hex')
# transaction composite key and database block id; internal use
_txck = property(lambda s: s._data['txck'])
_blockid = property(lambda s: keys.get_txck_blockid(s._txck))
def _previous_uock(self, index):
previous_txn = self.previous_transaction(index)
if previous_txn is None: return None
po = self.inputs[index].previous_output
return keys.get_uock(previous_txn._txck, po.index)
@property
def txn(self):
'The raw transaction object.'
if self._transaction is None:
(vl, self._transaction) = protocol.Txn.parse(self.txn_binary)
return self._transaction
txn_binary = property(lambda s: str(s._data['txn']))
class Database(database.Database):
MINIMUM_N = 4
TARGET_SIZE = (1 << 30) * 7 // 4 # 1.75GB
Columns = [
('txck', 'integer primary key', False),
('txid_hint', 'integer', True),
('txn', 'blob', False),
]
Name = 'txns'
def __init__(self, data_dir = None, coin = coins.Bitcoin):
database.Database.__init__(self, data_dir, coin)
# maps (n, i % n) tuples to sqlite connection
self._connections = dict()
# the largest N level on disk
self._N = self.load_n()
# loading/creating a connection loads/creates the entire level
n = self._N
while n >= self.MINIMUM_N:
self.get_connection(n, 0, True)
n //= 2
#self._unspent = unspent.Database(self.data_dir, coin)
def load_n(self):
'Determine the highest N for a database directory.'
n = self.MINIMUM_N
while True:
if not os.path.isfile(self.get_filename(self.get_suffix(n * 2, 0))):
break
n *= 2
return n
def get_suffix(self, n, q):
return '-%03d-%03d' % (n, q % n)
def get_connection(self, n, q, allow_create = False):
'''Get a connection for the database file at (n, q % n). First a
connection cache is searched. Then the disk is checked for new
files, in which case every file at level n is loaded.
If allow_create and the database file does not exist, all
partitions at the level n are created.'''
# the location we want
loc = (n, q % n)
if loc not in self._connections:
locs = [(n, i) for i in xrange(0, n)]
# doesn't exist; create the files backward
if not os.path.isfile(self.get_filename(self.get_suffix(n, 0))):
if not allow_create: return None
locs.reverse()
for l in locs:
suffix = self.get_suffix(l[0], l[1])
self._connections[l] = database.Database.get_connection(self, suffix)
return self._connections[loc]
def check_size(self):
'Checks the sizes of the database level, increasing the size as needed.'
# if any (statistically selected) database is full, increase our size
suffix = self.get_suffix(self._N, random.randint(0, self._N - 1))
filename = self.get_filename(suffix)
if os.path.getsize(filename) > self.TARGET_SIZE:
self._N *= 2
self.get_connection(self._N, 0, True)
def add(self, block, transactions):
'Add transactions to the database.'
# expand the database if necessary
self.check_size()
# check the merkle root of the transactions against the block
block._check_merkle_root(util.get_merkle_root(transactions))
# for each transaction...
connections = dict()
block_txns = [ ]
for (txn_index, txn) in enumerate(transactions):
# ...get the database to save to
txid = txn.hash
q = get_q(txid)
connection = self.get_connection(self._N, q)
connections[(self._N, q % self._N)] = connection
# ...insert
cursor = connection.cursor()
txck = keys.get_txck(block._blockid, txn_index)
row = (txck, keys.get_hint(txid), buffer(txn.binary()))
try:
cursor.execute(self.sql_insert, row)
# (duplicates don't matter)
except sqlite3.IntegrityError, e:
if e.message != _KEY_DUP:
raise e
# wrap up the transaction for the returned block
block_txns.append(Transaction(self, row, txn))
# commit the transactions to the databases
for connection in connections.values():
connection.commit()
# update the block with the transactions
block._update_transactions(block_txns)
# return the now updated block
return block
# @TODO optimization: store in each txn db a max_blockid so we can prune
def _get(self, txck):
''
for connection in self._connections.values():
cursor = connection.cursor()
cursor.execute(self.sql_select + ' where txck = ?', (txck, ))
row = cursor.fetchone()
if row:
return Transaction(self, row)
return None
def _get_transactions(self, blockid):
"Find all transactions for a block, ordered by transaction index. Internal use."
# the range that this block's composite keys can have [lo, hi)
lo = keys.get_txck(blockid, 0)
hi = keys.get_txck(blockid + 1, 0)
# find all transactions across all databases within this range
txns = [ ]
for connection in self._connections.values():
cursor = connection.cursor()
cursor.execute(self.sql_select + ' where txck >= ? and txck < ?', (lo, hi))
txns.extend((r[0], r) for r in cursor.fetchall())
# sort by index (actually (blockid, index), but all have same blockid)
txns.sort()
# wrap it up in a helpful wrapper
return [Transaction(self, row) for (txck, row) in txns]
def get(self, txid, default = None):
'Get a transaction by its txid.'
# the hint we index by for faster lookup
txid_hint = keys.get_hint(txid)
# search each level (n, n // 2, n // 4, etc)
n = self._N
q = get_q(txid)
while n >= self.MINIMUM_N:
connection = self.get_connection(n, q)
cursor = connection.cursor()
cursor.execute(self.sql_select + ' where txid_hint = ?', (txid_hint, ))
for row in cursor.fetchall():
(vl, txn) = protocol.Txn.parse(row[2])
if txn.hash == txid:
return Transaction(self, row, txn)
n //= 2
# maybe another process grew us, and we didn't know? Try again.
new_n = self.load_n()
if new_n != self._N:
self._N = new_n
return self._get(txid)
return default
#def __getitem__(self, name):
# 'Get a transaction by its txid.'
#
# txn = self.get(name)
# if txn is not None:
# return txn
# raise KeyError(name)
# Useful? Should it return a blockhain.transaction.Transaction or protocol.Txn?
#def __iter__(self):
# 'Iterate over every transaction. There is no meaningful order.'
#
# for connection in self._connections.values():
# cursor = connection.cursor()
# cursor.execute(self.sql_select)
# while True:
# rows = cursor.fetchmany()
# if not rows: break
# for row in rows:
# #yield Transaction(self, row)
# (vl, txn) = protocol.Txn.parse(row[2])[1]
# yield txn
| 1.609375
| 2
|
mayan/apps/mailer/workflow_actions.py
|
CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons
| 2
|
12775698
|
import logging
from django.utils.translation import ugettext_lazy as _
from mayan.apps.acls.models import AccessControlList
from mayan.apps.document_states.classes import WorkflowAction
from .models import UserMailer
from .permissions import permission_user_mailer_use
__all__ = ('EmailAction',)
logger = logging.getLogger(name=__name__)
class EmailAction(WorkflowAction):
fields = {
'mailing_profile': {
'label': _('Mailing profile'),
'class': 'django.forms.ModelChoiceField', 'kwargs': {
'help_text': _('Mailing profile to use when sending the email.'),
'queryset': UserMailer.objects.none(), 'required': True
}
},
'recipient': {
'label': _('Recipient'),
'class': 'django.forms.CharField', 'kwargs': {
'help_text': _(
'Email address of the recipient. Can be multiple addresses '
'separated by comma or semicolon. A template can be used '
'to reference properties of the document.'
),
'required': True
}
},
'cc': {
'label': _('CC'),
'class': 'django.forms.CharField', 'kwargs': {
'help_text': _(
'Address used in the "Bcc" header when sending the '
'email. Can be multiple addresses '
'separated by comma or semicolon. A template can be used '
'to reference properties of the document.'
),
'required': False
}
},
'bcc': {
'label': _('BCC'),
'class': 'django.forms.CharField', 'kwargs': {
'help_text': _(
'Address used in the "Bcc" header when sending the '
'email. Can be multiple addresses '
'separated by comma or semicolon. A template can be used '
'to reference properties of the document.'
),
'required': False
}
},
'reply_to': {
'label': _('Reply to'),
'class': 'django.forms.CharField', 'kwargs': {
'help_text': _(
'Address used in the "Reply-To" header when sending the '
'email. Can be multiple addresses '
'separated by comma or semicolon. A template can be used '
'to reference properties of the document.'
),
'required': False
}
},
'subject': {
'label': _('Subject'),
'class': 'django.forms.CharField', 'kwargs': {
'help_text': _(
'Subject of the email. Can be a string or a template.'
),
'required': True
}
},
'body': {
'label': _('Body'),
'class': 'django.forms.CharField', 'kwargs': {
'help_text': _(
'Body of the email to send. Can be a string or a template.'
),
'required': True
}
},
'attachment': {
'label': _('Attachment'),
'class': 'django.forms.BooleanField', 'default': False,
'help_text': _(
'Attach the document to the mail.'
),
'required': False
},
}
field_order = (
'mailing_profile', 'recipient', 'cc', 'bcc', 'reply_to', 'subject',
'body', 'attachment'
)
label = _('Send email')
widgets = {
'body': {
'class': 'django.forms.widgets.Textarea', 'kwargs': {}
}
}
permission = permission_user_mailer_use
def execute(self, context):
recipient = self.render_field(
field_name='recipient', context=context
)
cc = self.render_field(
field_name='cc', context=context
)
bcc = self.render_field(
field_name='bcc', context=context
)
reply_to = self.render_field(
field_name='reply_to', context=context
)
subject = self.render_field(
field_name='subject', context=context
)
body = self.render_field(
field_name='body', context=context
)
user_mailer = self.get_user_mailer()
kwargs = {
'bcc': bcc, 'cc': cc, 'body': body, 'reply_to': reply_to,
'subject': subject, 'to': recipient
}
if self.form_data.get('attachment', False):
kwargs.update(
{
'as_attachment': True,
'document': context['document']
}
)
user_mailer.send_document(**kwargs)
else:
user_mailer.send(**kwargs)
def get_form_schema(self, **kwargs):
result = super().get_form_schema(**kwargs)
queryset = AccessControlList.objects.restrict_queryset(
permission=self.permission, queryset=UserMailer.objects.all(),
user=kwargs['request'].user
)
result['fields']['mailing_profile']['kwargs']['queryset'] = queryset
return result
def get_user_mailer(self):
return UserMailer.objects.get(pk=self.form_data['mailing_profile'])
| 2.015625
| 2
|
Examples/Python/ImageCreateAndSet.py
|
nathantspencer/SimpleElastix
| 350
|
12775699
|
<reponame>nathantspencer/SimpleElastix
#!/usr/bin/env python
#=========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#=========================================================================
from __future__ import print_function
import SimpleITK as sitk
import os
xImg = sitk.Image( 256, 256, sitk.sitkFloat32 )
yImg = sitk.Image( 256, 256, sitk.sitkFloat32 )
for y in range( 0, xImg.GetSize()[1] ):
for x in range( 0, xImg.GetSize()[0] ):
xImg.SetPixel( x, y, x )
yImg[x, y] = y
sigma = 50
xImg = sitk.Subtract( xImg, xImg.GetSize()[0] / 2 )
yImg = yImg - yImg.GetSize()[1] / 2
gaussianImg = sitk.Exp( -1 * (xImg**2 + yImg**2) / (2.0 * sigma**2) )
if ( not "SITK_NOSHOW" in os.environ ):
sitk.Show( gaussianImg, "Gaussian Blob" )
| 2.421875
| 2
|
content/_build/jupyter_execute/11_db_access/02_sqlalchemy_orm.py
|
aviadr1/learn-python3
| 0
|
12775700
|
#!/usr/bin/env python
# coding: utf-8
#
# <a href="https://colab.research.google.com/github/aviadr1/learn-advanced-python/blob/master/content/11_db_access/02_sqlalchemy_orm.ipynb" target="_blank">
# <img src="https://colab.research.google.com/assets/colab-badge.svg"
# title="Open this file in Google Colab" alt="Colab"/>
# </a>
#
# # SQLAlchemy
# > SQLAlchemy is the Python SQL toolkit and Object Relational Mapper that gives application developers the full power and flexibility of SQL.
#
# We're going to show how to create a database, add some data and do basic queries.
# more complex queriex, doing migrations and database admin, are outside the scope of this lesson
# ## Create a new database from scratch
# Lets create a new database from scratch. we will
# 1. Create classes to define a schema
# 2. Map the scheme to a database
# 3. add objects to the database
# 4. run queries
#
# > NOTE: we will use an in-memory database, but running with a file based one or a remote database would be just as easy
# ### 1. Create a database session
# In[1]:
from sqlalchemy import create_engine
#engine = create_engine('sqlite:///example.db', echo=True)
engine = create_engine('sqlite:///:memory:', echo=True)
#engine = create_engine('sqlite:///:memory:')
conn = engine.connect()
from sqlalchemy.orm import sessionmaker
Session = sessionmaker(bind=engine)
session = Session()
# ### 2. Helper functions to print SQL queries and SQL results
# In[2]:
from IPython.display import display
import pandas as pd
import sqlalchemy
def sql(query):
print()
print(query)
print()
def get_results(query):
global engine
q = query.statement if isinstance(query, sqlalchemy.orm.query.Query) else query
return pd.read_sql(q, engine)
def display_results(query):
df = get_results(query)
display(df)
#sql(query)
# ### 3. creating a schema base
# In[3]:
from sqlalchemy.ext.declarative import declarative_base
import sqlalchemy_explore
### the basic base class for SQLAlchemy schema objects
# Base = declarative_base(bind=engine)
### base class including utils like an __repr__ method
### see https://pypi.org/project/sqlalchemy-explore/
Base = declarative_base(cls=sqlalchemy_explore.ReflectiveMixin)
# ### 4. Create the schema
# In[4]:
from sqlalchemy import Column, DateTime, ForeignKey, Integer, NVARCHAR, Numeric, Sequence
from sqlalchemy.orm import relationship
class Customer(Base):
__tablename__ = 'customers'
CustomerId = Column(Integer, Sequence('customer_id_seq'), primary_key=True)
FirstName = Column(NVARCHAR(40), nullable=False)
LastName = Column(NVARCHAR(20), nullable=False)
Company = Column(NVARCHAR(80))
Address = Column(NVARCHAR(70))
Phone = Column(NVARCHAR(24))
Email = Column(NVARCHAR(60), nullable=False)
class Item(Base):
__tablename__ = 'items'
ItemId = Column(Integer, Sequence('item_id_seq'), primary_key=True)
Name = Column(NVARCHAR(40), nullable=False)
Price = Column(Numeric, nullable=False)
class Purchase(Base):
__tablename__ = 'purchases'
PurchaseId = Column(Integer, Sequence('purchase_id_seq'), primary_key=True)
ItemId = Column(ForeignKey('items.ItemId'), nullable=False, index=True)
CustomerId = Column(ForeignKey('customers.CustomerId'), nullable=False, index=True)
Date = Column(DateTime, nullable=False)
item = relationship('Item')
customer = relationship('Customer')
# In[5]:
Purchase.ItemId.name
# ### 5. Create tables in the database to conform with the schema
# In[6]:
Base.metadata.create_all(engine)
# ### 6. Create a customer
# In[7]:
moshe = Customer(
FirstName='Moshe',
LastName='Cohen',
Address='Alenbi 99, Tel Aviv',
Phone="053-5556789",
Email='<EMAIL>')
session.add(moshe)
session.commit()
# ### 7. run queries
# #### Using SQLAlchemy expression language
# In[8]:
from sqlalchemy import select
customers_query = select([Customer.FirstName, Customer.Email])
results = conn.execute(customers_query)
print()
for row in results:
print(row)
print()
print(type(row)) # rows are of type sqlalchemy.engine.result.RowProxy
# > Our handy `display_results` function uses `pandas` library to display the results as a table
# In[9]:
display_results(customers_query)
# #### Using SQLAlchemy ORM Object Relation Manager
# In[10]:
results = session.query(Customer)
print()
for customer in results:
print(customer)
print()
print(type(customer))
# ## Reflect an existing database
#
# When we have an existing database, and would like to start accessing this database using SQLAlchemy, we need to have classes that represent the database.
#
# Being good lazy programmers, we often don't want to write these classes by hand, and would like a helpful start.
# We're going to show how to create such classes from an existing database.
#
# we will do it in two methods
# 1. use the automap class in SQLAlchemy to create dynamic classes (without source) for the db
# 2. use the `sqlacodegen` module [1] to generate source code for classes
#
# [1]: https://pypi.org/project/sqlacodegen/
#
# ### Chinook sample DB
# Let's download a sample database called [Chinook](http://www.sqlitetutorial.net/sqlite-sample-database/)
# 
#
# there's a file in the `notebooks` directory called `download_chinook.py` with simple code to download this zip to the current directory.
# > lets run `download_chinook.download` now
# In[11]:
# run this code to get the chinook database
import download_chinook
download_chinook.download()
# > Now lets connect to the database and create an `engine` variable
# In[12]:
import sqlalchemy
from sqlalchemy import create_engine
engine = create_engine('sqlite:///chinook.db')
# > lets get the list of table names from the database
# In[13]:
engine.table_names()
# ### using automap to refelect a db
#
# We use the `automap` extension to __dynamically__ create classes for each table __at runtime__.
#
# - __advantage__: automap is faily easy to use, and it comes bundled in SQLAlchemy without a need for installing additional modules
# - __disadvantage__: There is no way to see the _code_ for these automap classes. we need to use the classes without seeing the code for it
#
# > first, lets define a helper function called `automap_database()` that generates classes for us
# In[14]:
def automap_database(engine):
### useful: extract classes from the chinook database
metadata = sqlalchemy.MetaData()
metadata.reflect(engine)
## we need to do this once
from sqlalchemy.ext.automap import automap_base
# produce a set of mappings from this MetaData.
Base = automap_base(metadata=metadata)
# calling prepare() just sets up mapped classes and relationships.
Base.prepare()
return Base
# > next, lets use the `automap_database()` function and see which classes were generated
# In[15]:
# create dynamic classes for every table using automap
AutoMapBase = automap_database(engine)
# which classes were generated?
print('Generated the following classes:')
print('\t', ', '.join(AutoMapBase.classes.keys()))
# Let's prepare an ORM session so we can query the database based on these classes
from sqlalchemy.orm import sessionmaker
Session = sessionmaker(bind=engine)
session = Session()
# > Lastly, lets use one of these classes, see what columns it has and use it to query the database
# In[16]:
# lets get the Album class for the albums table
Album = AutoMapBase.classes['albums']
# what columns are available in this class?
print('columns for Album class:')
print('\t', Album.__table__.columns) # 'albums.AlbumId', 'albums.Title', 'albums.ArtistId'
# lets get the first album and print it out
first_album = session.query(Album).first()
print()
print('first album:', type(first_album))
print('\t', first_album.AlbumId, first_album.Title, first_album.ArtistId)
# ### using sqlacodegen to generate classes with source code
#
# > first, we need to install the `sqlacodegen` module
# In[17]:
pip install sqlacodegen
# > now, lets run it
# In[18]:
get_ipython().system('sqlacodegen sqlite:///chinook.db --tables albums,artists,customers,employees,genres,invoice_items,invoices,tracks')
# > We can now copy-paste the generated source for these classes into our code so we can start using it
# In[19]:
from sqlalchemy import Column, DateTime, ForeignKey, Integer, NVARCHAR, Numeric
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class Artist(Base):
__tablename__ = 'artists'
ArtistId = Column(Integer, primary_key=True)
Name = Column(NVARCHAR(120))
class Employee(Base):
__tablename__ = 'employees'
EmployeeId = Column(Integer, primary_key=True)
LastName = Column(NVARCHAR(20), nullable=False)
FirstName = Column(NVARCHAR(20), nullable=False)
Title = Column(NVARCHAR(30))
ReportsTo = Column(ForeignKey('employees.EmployeeId'), index=True)
BirthDate = Column(DateTime)
HireDate = Column(DateTime)
Address = Column(NVARCHAR(70))
City = Column(NVARCHAR(40))
State = Column(NVARCHAR(40))
Country = Column(NVARCHAR(40))
PostalCode = Column(NVARCHAR(10))
Phone = Column(NVARCHAR(24))
Fax = Column(NVARCHAR(24))
Email = Column(NVARCHAR(60))
parent = relationship('Employee', remote_side=[EmployeeId])
class Genre(Base):
__tablename__ = 'genres'
GenreId = Column(Integer, primary_key=True)
Name = Column(NVARCHAR(120))
class MediaType(Base):
__tablename__ = 'media_types'
MediaTypeId = Column(Integer, primary_key=True)
Name = Column(NVARCHAR(120))
class Album(Base):
__tablename__ = 'albums'
AlbumId = Column(Integer, primary_key=True)
Title = Column(NVARCHAR(160), nullable=False)
ArtistId = Column(ForeignKey('artists.ArtistId'), nullable=False, index=True)
artist = relationship('Artist')
class Customer(Base):
__tablename__ = 'customers'
CustomerId = Column(Integer, primary_key=True)
FirstName = Column(NVARCHAR(40), nullable=False)
LastName = Column(NVARCHAR(20), nullable=False)
Company = Column(NVARCHAR(80))
Address = Column(NVARCHAR(70))
City = Column(NVARCHAR(40))
State = Column(NVARCHAR(40))
Country = Column(NVARCHAR(40))
PostalCode = Column(NVARCHAR(10))
Phone = Column(NVARCHAR(24))
Fax = Column(NVARCHAR(24))
Email = Column(NVARCHAR(60), nullable=False)
SupportRepId = Column(ForeignKey('employees.EmployeeId'), index=True)
employee = relationship('Employee')
class Invoice(Base):
__tablename__ = 'invoices'
InvoiceId = Column(Integer, primary_key=True)
CustomerId = Column(ForeignKey('customers.CustomerId'), nullable=False, index=True)
InvoiceDate = Column(DateTime, nullable=False)
BillingAddress = Column(NVARCHAR(70))
BillingCity = Column(NVARCHAR(40))
BillingState = Column(NVARCHAR(40))
BillingCountry = Column(NVARCHAR(40))
BillingPostalCode = Column(NVARCHAR(10))
Total = Column(Numeric(10, 2), nullable=False)
customer = relationship('Customer')
class Track(Base):
__tablename__ = 'tracks'
TrackId = Column(Integer, primary_key=True)
Name = Column(NVARCHAR(200), nullable=False)
AlbumId = Column(ForeignKey('albums.AlbumId'), index=True)
MediaTypeId = Column(ForeignKey('media_types.MediaTypeId'), nullable=False, index=True)
GenreId = Column(ForeignKey('genres.GenreId'), index=True)
Composer = Column(NVARCHAR(220))
Milliseconds = Column(Integer, nullable=False)
Bytes = Column(Integer)
UnitPrice = Column(Numeric(10, 2), nullable=False)
album = relationship('Album')
genre = relationship('Genre')
media_type = relationship('MediaType')
class InvoiceItem(Base):
__tablename__ = 'invoice_items'
InvoiceLineId = Column(Integer, primary_key=True)
InvoiceId = Column(ForeignKey('invoices.InvoiceId'), nullable=False, index=True)
TrackId = Column(ForeignKey('tracks.TrackId'), nullable=False, index=True)
UnitPrice = Column(Numeric(10, 2), nullable=False)
Quantity = Column(Integer, nullable=False)
invoice = relationship('Invoice')
track = relationship('Track')
# > lets create a new engine and new orm session to use with this metadata (it is different from the automap metadata)
# In[20]:
from sqlalchemy import create_engine
engine = create_engine('sqlite:///chinook.db')
conn = engine.connect()
metadata.reflect(engine)
from sqlalchemy.orm import sessionmaker
Session = sessionmaker(bind=engine)
session = Session()
# > lets make a simple query
# In[21]:
# lets get the first album and print it out
first_track = session.query(Track).first()
print(type(first_track))
print('Song:', first_track.Name, '| Album:', first_track.album.Title, '| Artist:', first_track.album.artist.Name)
# # Further reading
# - [Toward Data Science' SQLAlchemy tutorial](https://towardsdatascience.com/sqlalchemy-python-tutorial-79a577141a91)
# - [SQLAlchemy Object Relational Tutorial](https://docs.sqlalchemy.org/en/13/orm/tutorial.html)
# - [SQLAlchemy Expression Language Tutorial](https://docs.sqlalchemy.org/en/13/core/tutorial.html)
# - [SQLAlchemy ORM Tutorial for Python Developers](https://auth0.com/blog/sqlalchemy-orm-tutorial-for-python-developers/)
# - [sqlalchemy-explore](https://pypi.org/project/sqlalchemy-explore/)
# - Sample databases
# - https://github.com/jpwhite3/northwind-SQLite3
# - https://github.com/arjunchndr/Analyzing-Chinook-Database-using-SQL-and-Python
# In[ ]:
| 4.1875
| 4
|
zeex/core/ui/actions/split_ui.py
|
zbarge/dbtrix
| 10
|
12775701
|
<reponame>zbarge/dbtrix
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:/Users/Zeke/Google Drive/dev/python/zeex/zeex/core/ui/actions/split.ui'
#
# Created: Mon Nov 13 22:57:16 2017
# by: pyside-uic 0.2.15 running on PySide 1.2.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_FileSplitDialog(object):
def setupUi(self, FileSplitDialog):
FileSplitDialog.setObjectName("FileSplitDialog")
FileSplitDialog.resize(416, 359)
self.verticalLayout = QtGui.QVBoxLayout(FileSplitDialog)
self.verticalLayout.setObjectName("verticalLayout")
self.formLayout = QtGui.QFormLayout()
self.formLayout.setObjectName("formLayout")
self.exportBasePathLabel = QtGui.QLabel(FileSplitDialog)
self.exportBasePathLabel.setObjectName("exportBasePathLabel")
self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.exportBasePathLabel)
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.btnExportBrowse = QtGui.QPushButton(FileSplitDialog)
self.btnExportBrowse.setObjectName("btnExportBrowse")
self.gridLayout.addWidget(self.btnExportBrowse, 0, 1, 1, 1)
self.lineEditExportPath = QtGui.QLineEdit(FileSplitDialog)
self.lineEditExportPath.setObjectName("lineEditExportPath")
self.gridLayout.addWidget(self.lineEditExportPath, 0, 0, 1, 1)
self.formLayout.setLayout(1, QtGui.QFormLayout.FieldRole, self.gridLayout)
self.splitOnLabel = QtGui.QLabel(FileSplitDialog)
self.splitOnLabel.setObjectName("splitOnLabel")
self.formLayout.setWidget(4, QtGui.QFormLayout.LabelRole, self.splitOnLabel)
self.pushGrid = QtGui.QGridLayout()
self.pushGrid.setObjectName("pushGrid")
self.listViewSplitOnLeft = QtGui.QListView(FileSplitDialog)
self.listViewSplitOnLeft.setObjectName("listViewSplitOnLeft")
self.pushGrid.addWidget(self.listViewSplitOnLeft, 0, 0, 1, 1)
self.listViewSplitOnRight = QtGui.QListView(FileSplitDialog)
self.listViewSplitOnRight.setObjectName("listViewSplitOnRight")
self.pushGrid.addWidget(self.listViewSplitOnRight, 0, 2, 1, 1)
self.btnGrid = QtGui.QGridLayout()
self.btnGrid.setObjectName("btnGrid")
self.btnSplitOnPushRight = QtGui.QPushButton(FileSplitDialog)
self.btnSplitOnPushRight.setObjectName("btnSplitOnPushRight")
self.btnGrid.addWidget(self.btnSplitOnPushRight, 0, 0, 1, 1)
self.btnSplitOnPushLeft = QtGui.QPushButton(FileSplitDialog)
self.btnSplitOnPushLeft.setObjectName("btnSplitOnPushLeft")
self.btnGrid.addWidget(self.btnSplitOnPushLeft, 1, 0, 1, 1)
self.pushGrid.addLayout(self.btnGrid, 0, 1, 1, 1)
self.formLayout.setLayout(4, QtGui.QFormLayout.FieldRole, self.pushGrid)
self.sourcePathLabel = QtGui.QLabel(FileSplitDialog)
self.sourcePathLabel.setObjectName("sourcePathLabel")
self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.sourcePathLabel)
self.gridLayout_2 = QtGui.QGridLayout()
self.gridLayout_2.setObjectName("gridLayout_2")
self.btnSourcePathBrowse = QtGui.QPushButton(FileSplitDialog)
self.btnSourcePathBrowse.setObjectName("btnSourcePathBrowse")
self.gridLayout_2.addWidget(self.btnSourcePathBrowse, 0, 1, 1, 1)
self.lineEditSourcePath = QtGui.QLineEdit(FileSplitDialog)
self.lineEditSourcePath.setObjectName("lineEditSourcePath")
self.gridLayout_2.addWidget(self.lineEditSourcePath, 0, 0, 1, 1)
self.formLayout.setLayout(0, QtGui.QFormLayout.FieldRole, self.gridLayout_2)
self.pushGrid_2 = QtGui.QGridLayout()
self.pushGrid_2.setObjectName("pushGrid_2")
self.listViewUseColsLeft = QtGui.QListView(FileSplitDialog)
self.listViewUseColsLeft.setObjectName("listViewUseColsLeft")
self.pushGrid_2.addWidget(self.listViewUseColsLeft, 0, 0, 1, 1)
self.listViewUseColsRight = QtGui.QListView(FileSplitDialog)
self.listViewUseColsRight.setObjectName("listViewUseColsRight")
self.pushGrid_2.addWidget(self.listViewUseColsRight, 0, 2, 1, 1)
self.btnGrid_2 = QtGui.QGridLayout()
self.btnGrid_2.setObjectName("btnGrid_2")
self.btnUseColsPushRight = QtGui.QPushButton(FileSplitDialog)
self.btnUseColsPushRight.setObjectName("btnUseColsPushRight")
self.btnGrid_2.addWidget(self.btnUseColsPushRight, 0, 0, 1, 1)
self.btnUseColsPushLeft = QtGui.QPushButton(FileSplitDialog)
self.btnUseColsPushLeft.setObjectName("btnUseColsPushLeft")
self.btnGrid_2.addWidget(self.btnUseColsPushLeft, 1, 0, 1, 1)
self.pushGrid_2.addLayout(self.btnGrid_2, 0, 1, 1, 1)
self.formLayout.setLayout(5, QtGui.QFormLayout.FieldRole, self.pushGrid_2)
self.useColumnsLabel = QtGui.QLabel(FileSplitDialog)
self.useColumnsLabel.setObjectName("useColumnsLabel")
self.formLayout.setWidget(5, QtGui.QFormLayout.LabelRole, self.useColumnsLabel)
self.gridLayout_3 = QtGui.QGridLayout()
self.gridLayout_3.setObjectName("gridLayout_3")
self.maxRowsLabel = QtGui.QLabel(FileSplitDialog)
self.maxRowsLabel.setObjectName("maxRowsLabel")
self.gridLayout_3.addWidget(self.maxRowsLabel, 0, 1, 1, 1)
self.lineEditMaxRows = QtGui.QLineEdit(FileSplitDialog)
self.lineEditMaxRows.setObjectName("lineEditMaxRows")
self.gridLayout_3.addWidget(self.lineEditMaxRows, 0, 2, 1, 1)
self.checkBoxDropNulls = QtGui.QCheckBox(FileSplitDialog)
self.checkBoxDropNulls.setObjectName("checkBoxDropNulls")
self.gridLayout_3.addWidget(self.checkBoxDropNulls, 0, 0, 1, 1)
self.formLayout.setLayout(7, QtGui.QFormLayout.FieldRole, self.gridLayout_3)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.formLayout.setItem(6, QtGui.QFormLayout.LabelRole, spacerItem)
self.gridLayout_4 = QtGui.QGridLayout()
self.gridLayout_4.setObjectName("gridLayout_4")
self.btnExportTemplate = QtGui.QPushButton(FileSplitDialog)
self.btnExportTemplate.setObjectName("btnExportTemplate")
self.gridLayout_4.addWidget(self.btnExportTemplate, 0, 3, 1, 1)
self.btnImportTemplate = QtGui.QPushButton(FileSplitDialog)
self.btnImportTemplate.setObjectName("btnImportTemplate")
self.gridLayout_4.addWidget(self.btnImportTemplate, 0, 2, 1, 1)
self.lineEditTemplate = QtGui.QLineEdit(FileSplitDialog)
self.lineEditTemplate.setObjectName("lineEditTemplate")
self.gridLayout_4.addWidget(self.lineEditTemplate, 0, 1, 1, 1)
self.formLayout.setLayout(3, QtGui.QFormLayout.FieldRole, self.gridLayout_4)
self.labelTemplate = QtGui.QLabel(FileSplitDialog)
self.labelTemplate.setObjectName("labelTemplate")
self.formLayout.setWidget(3, QtGui.QFormLayout.LabelRole, self.labelTemplate)
self.verticalLayout.addLayout(self.formLayout)
self.buttonBox = QtGui.QDialogButtonBox(FileSplitDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout.addWidget(self.buttonBox)
self.retranslateUi(FileSplitDialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("accepted()"), FileSplitDialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("rejected()"), FileSplitDialog.reject)
QtCore.QMetaObject.connectSlotsByName(FileSplitDialog)
def retranslateUi(self, FileSplitDialog):
FileSplitDialog.setWindowTitle(QtGui.QApplication.translate("FileSplitDialog", "Dialog", None, QtGui.QApplication.UnicodeUTF8))
self.exportBasePathLabel.setText(QtGui.QApplication.translate("FileSplitDialog", "Export Base Path", None, QtGui.QApplication.UnicodeUTF8))
self.btnExportBrowse.setText(QtGui.QApplication.translate("FileSplitDialog", "Browse", None, QtGui.QApplication.UnicodeUTF8))
self.splitOnLabel.setText(QtGui.QApplication.translate("FileSplitDialog", "Split On", None, QtGui.QApplication.UnicodeUTF8))
self.btnSplitOnPushRight.setText(QtGui.QApplication.translate("FileSplitDialog", ">>", None, QtGui.QApplication.UnicodeUTF8))
self.btnSplitOnPushLeft.setText(QtGui.QApplication.translate("FileSplitDialog", "<<", None, QtGui.QApplication.UnicodeUTF8))
self.sourcePathLabel.setText(QtGui.QApplication.translate("FileSplitDialog", "Source Path", None, QtGui.QApplication.UnicodeUTF8))
self.btnSourcePathBrowse.setText(QtGui.QApplication.translate("FileSplitDialog", "Browse", None, QtGui.QApplication.UnicodeUTF8))
self.btnUseColsPushRight.setText(QtGui.QApplication.translate("FileSplitDialog", ">>", None, QtGui.QApplication.UnicodeUTF8))
self.btnUseColsPushLeft.setText(QtGui.QApplication.translate("FileSplitDialog", "<<", None, QtGui.QApplication.UnicodeUTF8))
self.useColumnsLabel.setText(QtGui.QApplication.translate("FileSplitDialog", "Use Columns", None, QtGui.QApplication.UnicodeUTF8))
self.maxRowsLabel.setText(QtGui.QApplication.translate("FileSplitDialog", "Max Rows", None, QtGui.QApplication.UnicodeUTF8))
self.checkBoxDropNulls.setText(QtGui.QApplication.translate("FileSplitDialog", "Drop Null Values", None, QtGui.QApplication.UnicodeUTF8))
self.btnExportTemplate.setText(QtGui.QApplication.translate("FileSplitDialog", "Export", None, QtGui.QApplication.UnicodeUTF8))
self.btnImportTemplate.setText(QtGui.QApplication.translate("FileSplitDialog", "Import", None, QtGui.QApplication.UnicodeUTF8))
self.labelTemplate.setText(QtGui.QApplication.translate("FileSplitDialog", "Template", None, QtGui.QApplication.UnicodeUTF8))
| 1.40625
| 1
|
src/tests/test_ggf.py
|
dnf0/kcl-globalgasflaring
| 1
|
12775702
|
<gh_stars>1-10
import unittest
import pandas as pd
import numpy as np
import glob
import epr
import src.utils as utils
from src.ggf.detectors import SLSDetector, ATXDetector
class MyTestCase(unittest.TestCase):
# -----------------
# unit tests
# -----------------
def test_szn_interpolation(self):
path_to_data = glob.glob("../../data/test_data/S3A*.zip")[0]
path_to_target = "../../data/test_data/sls_szn.npy"
path_to_temp = "../../data/temp/"
target = np.load(path_to_target)
product = utils.extract_zip(path_to_data, path_to_temp)
HotspotDetector = SLSDetector(product)
HotspotDetector.run_detector()
self.assertEqual(True, (target == HotspotDetector.sza).all())
def test_night_mask_sls(self):
path_to_data = glob.glob("../../data/test_data/S3A*.zip")[0]
path_to_target = "../../data/test_data/sls_nightmask.npy"
path_to_temp = "../../data/temp/"
target = np.load(path_to_target)
product = utils.extract_zip(path_to_data, path_to_temp)
HotspotDetector = SLSDetector(product)
HotspotDetector.run_detector()
self.assertEqual(True, (target == HotspotDetector.night_mask).all())
def test_night_mask_atx(self):
path_to_data = glob.glob("../../data/test_data/*.N1")[0]
path_to_target = "../../data/test_data/atx_nightmask.npy"
target = np.load(path_to_target)
target_mean = np.mean(target)
product = epr.Product(path_to_data)
HotspotDetector = ATXDetector(product)
HotspotDetector.run_detector()
self.assertAlmostEqual(target_mean, np.mean(HotspotDetector.night_mask))
def test_vza_interpolation(self):
path_to_data = glob.glob("../../data/test_data/S3A*.zip")[0]
path_to_target = "../../data/test_data/sls_vza.npy"
path_to_temp = "../../data/temp/"
target = np.load(path_to_target)
product = utils.extract_zip(path_to_data, path_to_temp)
HotspotDetector = SLSDetector(product)
HotspotDetector.run_detector()
self.assertEqual(True, (target == HotspotDetector.vza).all())
def test_vza_mask(self):
path_to_data = glob.glob("../../data/test_data/S3A*.zip")[0]
path_to_target = "../../data/test_data/sls_vza_mask.npy"
path_to_temp = "../../data/temp/"
target = np.load(path_to_target)
product = utils.extract_zip(path_to_data, path_to_temp)
HotspotDetector = SLSDetector(product)
HotspotDetector.run_detector()
self.assertEqual(True, (target == HotspotDetector.vza_mask).all())
def test_detect_hotspots_sls(self):
path_to_data = glob.glob("../../data/test_data/S3A*.zip")[0]
path_to_target = "../../data/test_data/sls_detect_hotspots.npy"
path_to_temp = "../../data/temp/"
target = np.load(path_to_target)
product = utils.extract_zip(path_to_data, path_to_temp)
HotspotDetector = SLSDetector(product)
HotspotDetector.run_detector()
self.assertEqual(True, (target == HotspotDetector.hotspots).all())
def test_detect_hotspots_atx(self):
path_to_data = glob.glob("../../data/test_data/*.N1")[0]
path_to_target = "../../data/test_data/atx_detect_hotspots.npy"
target = np.load(path_to_target)
product = epr.Product(path_to_data)
HotspotDetector = ATXDetector(product)
HotspotDetector.run_detector()
self.assertEqual(True, (target == HotspotDetector.hotspots).all())
def test_cloud_free_atx(self):
path_to_data = glob.glob("../../data/test_data/*.N1")[0]
path_to_target = "../../data/test_data/atx_cloud_mask.npy"
target = np.load(path_to_target)
product = epr.Product(path_to_data)
HotspotDetector = ATXDetector(product)
HotspotDetector.run_detector()
self.assertEqual(True, (target == HotspotDetector.cloud_free).all())
def test_get_arcmin_int(self):
coords = np.array([-150.53434, -100.13425, -50.20493, 0.34982, 50.43562, 100.12343, 150.56443])
target = np.array([-15032, -10008, -5012, 21, 5026, 10007, 15034])
path_to_data = glob.glob("../../data/test_data/*.N1")[0]
product = epr.Product(path_to_data)
HotspotDetector = ATXDetector(product)
result = HotspotDetector._find_arcmin_gridcell(coords)
self.assertEqual(True, (target == result).all())
def test_radiance_from_reflectance(self):
path_to_target = "../../data/test_data/atx_radiance_from_reflectance.npy"
target = np.load(path_to_target)
path_to_data = glob.glob("../../data/test_data/*.N1")[0]
product = epr.Product(path_to_data)
HotspotDetector = ATXDetector(product)
reflectance = product.get_band('reflec_nadir_1600').read_as_array()
result = HotspotDetector._rad_from_ref(reflectance)
self.assertEqual(True, (target == result).all())
def test_radiance_from_BT(self):
path_to_data = glob.glob("../../data/test_data/*.N1")[0]
product = epr.Product(path_to_data)
HotspotDetector = ATXDetector(product)
brightness_temp = 1500
wavelength = 1.6
result = HotspotDetector._rad_from_BT(wavelength, brightness_temp)
target = 28200.577465487077
self.assertAlmostEqual(target, result)
def test_sun_earth_distance(self):
path_to_data = glob.glob("../../data/test_data/*.N1")[0]
product = epr.Product(path_to_data)
HotspotDetector = ATXDetector(product)
target = 0.9877038273760421
result = HotspotDetector._compute_sun_earth_distance()
self.assertAlmostEqual(target, result)
def test_compute_frp(self):
path_to_data = glob.glob("../../data/test_data/*.N1")[0]
product = epr.Product(path_to_data)
HotspotDetector = ATXDetector(product)
HotspotDetector.run_detector(flares_or_sampling=True)
path_to_target = "../../data/test_data/atx_frp.npy"
target = np.load(path_to_target)
result = HotspotDetector.frp
self.assertEqual(True, (target == result).all())
# -----------------
# functional tests
# -----------------
def test_run_atx(self):
target = pd.read_csv(glob.glob("../../data/test_data/ATS*.csv")[0])
path_to_data = glob.glob("../../data/test_data/*.N1")[0]
product = epr.Product(path_to_data)
HotspotDetector = ATXDetector(product)
HotspotDetector.run_detector()
result = HotspotDetector.to_dataframe(keys=['latitude', 'longitude'])
# TODO determine why floating point errors are causing issues in testing here
target = target.astype(int)
result = result.astype(int)
are_equal = target.equals(result)
self.assertEqual(True, are_equal)
def test_run_sls(self):
# setup
target = pd.read_csv(glob.glob("../../data/test_data/S3A*.csv")[0])
path_to_data = glob.glob("../../data/test_data/S3A*.zip")[0]
path_to_temp = "../../data/temp/"
product = utils.extract_zip(path_to_data, path_to_temp)
HotspotDetector = SLSDetector(product)
HotspotDetector.run_detector()
result = HotspotDetector.to_dataframe(keys=['latitude', 'longitude', 'sza', 'vza', 'swir_16', 'swir_22'])
# TODO determine why floating point errors are causing issues in testing here
target = target.astype(int)
result = result.astype(int)
# compare
are_equal = target.equals(result)
self.assertEqual(True, are_equal)
if __name__ == '__main__':
unittest.main()
| 2.296875
| 2
|
wiki/views.py
|
meredithcat/makewiki_v2
| 0
|
12775703
|
from django.shortcuts import render, redirect
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.views.generic.edit import FormView
from django.contrib.auth.models import User
from wiki.models import Page
from wiki.forms import PageForm
class PageListView(ListView):
""" Renders a list of all Pages. """
model = Page
def get(self, request):
""" GET a list of Pages. """
pages = self.get_queryset().all()
return render(request, 'list.html', {
'pages': pages
})
class PageDetailView(DetailView):
""" Renders a specific page based on it's slug."""
model = Page
def get(self, request, slug):
""" Returns a specific wiki page by slug. """
page = self.get_queryset().get(slug__iexact=slug)
return render(request, 'page.html', {
'page': page
})
class PageCreateView(FormView):
template_name = 'create_page.html'
form_class = PageForm
success_url = '/'
def post(self, request):
page_form = PageForm(request.POST)
page = page_form.save(commit=False)
page.author = User.objects.get(id=request.POST['author'])
page.save()
return redirect(page)
def form_valid(self, form):
return super().form_valid(form)
| 2.21875
| 2
|
mirrorkan_generate_feed.py
|
KSP-CKAN/MirrorKAN
| 4
|
12775704
|
#!/usr/bin/env python
import os, sys
from distutils.version import LooseVersion
from dateutil.parser import parse
from datetime import datetime, timedelta
import json
import operator
import pytz
import PyRSS2Gen
from mirrorkan import parse_ckan_metadata_directory
from mirrorkan_conf import *
from mirrorkan_util import find_files_with_extension
class EnhancedRSS2(PyRSS2Gen.RSS2):
def publish_extensions(self, handler):
PyRSS2Gen._element(handler, 'atom:link', None, {
'href': LOCAL_URL_PREFIX + "feed/ckan.rss",
'rel': 'self',
'type': 'application/rss+xml'})
def main():
print 'Building CKAN RSS Feed..'
ckan_files, ckan_json = parse_ckan_metadata_directory(LOCAL_CKAN_PATH)
unsorted_feed_path = os.path.join(FEED_PATH, 'unsorted.json')
with open(unsorted_feed_path, 'w') as unsorted_feed_file:
print 'Writing %s' % unsorted_feed_path
json.dump(ckan_json, unsorted_feed_file, indent=4, sort_keys=True)
sorted_ckan_json = sorted(ckan_json, key=lambda ckan: ckan[0].get('x_last_updated_ts'))
sorted_feed_path = os.path.join(FEED_PATH, 'sorted.json')
with open(sorted_feed_path, 'w') as sorted_feed_file:
print 'Writing %s' % sorted_feed_path
json.dump(sorted_ckan_json, sorted_feed_file, indent=4, sort_keys=True)
rssitems = []
module_number = 1000000
for ckan_module in sorted_ckan_json:
module_number = module_number + 1
# Fallback for link in case nothing can be determinded
link = 'http://kerbalstuff.com/' + str(module_number)
title = ckan_module[0]['name']
if 'resources' in ckan_module[0]:
if 'kerbalstuff' in ckan_module[0]['resources']:
link = ckan_module[0]['resources']['kerbalstuff']
# elif 'homepage' in ckan_module[0]['resources']:
# link = ckan_module[0]['resources']['homepage']
elif 'repository' in ckan_module[0]['resources']:
link = ckan_module[0]['resources']['repository']
# Make links unique
link = link + '#' + str(module_number)
description = ckan_module[0]['abstract']
guid = PyRSS2Gen.Guid(link, False)
pubDate = datetime.fromtimestamp(ckan_module[0].get('x_last_updated_ts', 1000000000), pytz.utc)
item = PyRSS2Gen.RSSItem(title,
link,
description,
None, # author
None, # categories
None, # comments
None, # enclosure
guid,
pubDate,
None) # source
rssitems.append(item)
rss = EnhancedRSS2(
title = INDEX_HTML_HEADER + " feed",
link = LOCAL_URL_PREFIX + "feed/ckan.rss",
description = "The latest ckan recipes",
lastBuildDate = datetime.now(pytz.utc),
items = rssitems)
rss.rss_attrs = {"version": "2.0", "xmlns:atom": "http://www.w3.org/2005/Atom"}
rss_feed_path = os.path.join(FEED_PATH, 'ckan.rss')
with open(rss_feed_path, 'w') as rss_feed_file:
print 'Writing %s' % rss_feed_path
rss.write_xml(rss_feed_file)
print 'Done!'
if __name__ == "__main__":
main()
| 2.09375
| 2
|
blog/urls.py
|
GuilleJR83/ProyectoInformatorio2021
| 0
|
12775705
|
#from django.conf import settings
from django.conf.urls.static import static
from django import views
from django.contrib import admin
from django.shortcuts import redirect
from django.urls import path, include
from django.utils import decorators
from blog import settings #from .settings import local
import categoria.views, comentario.views, cuenta.views, publicacion.views
"""blog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
urlpatterns = [
path('admin/', admin.site.urls), # sitio de administración
path('', publicacion.views.index, name='index'), # inicio de la web
path('nosotros', publicacion.views.nosotros, name='nosotros'), # inicio de la web
path('blog/post/nueva/', publicacion.views.nueva, name='publicacion_nueva'),
path('blog/post/editar/<int:id>/', publicacion.views.editar, name='publicacion_editar'),
path('blog/post/eliminar/<int:id>/', publicacion.views.eliminar, name='publicacion_eliminar'),
path('blog/post/<int:id>/', publicacion.views.ver, name='publicacion_ver'),
path('blog/post/autor/', publicacion.views.autor, name='publicacion_por_autor'),
path('blog/post/autor/<int:id>/', publicacion.views.autor, name='publicacion_por_autor'),
path('blog/post/<int:publicacion_id>/comentario/<int:comentario_id>/', comentario.views.ver, name='comentario_ver'),
path('categoria/listado/', categoria.views.listado, name='categoria_listado'),
path('categoria/nueva/', categoria.views.nueva, name='categoria_nueva'),
path('categoria/editar/<int:id>/', categoria.views.editar, name='categoria_editar'),
path('categoria/eliminar/<int:id>/', categoria.views.eliminar, name='categoria_eliminar'),
path('categoria/<int:id>/', categoria.views.filtrar, name='categoria_filtrar'),
path('usuario/listado/', cuenta.views.usuario_listado, name="usuario_listado"),
path('usuario/nuevo/', cuenta.views.usuario_nuevo, name="usuario_nuevo"),
path('usuario/editar/<int:id>/', cuenta.views.usuario_editar, name="usuario_editar"),
path('usuario/eliminar/<int:id>/', cuenta.views.usuario_eliminar, name="usuario_eliminar"),
path('usuario/tipo/', cuenta.views.tipo_listado, name="tipo_listado"),
path('usuario/tipo/nuevo/', cuenta.views.tipo_nuevo, name="tipo_nuevo"),
path('usuario/tipo/editar/<int:id>', cuenta.views.tipo_editar, name="tipo_editar"),
path('usuario/tipo/eliminar/<int:id>', cuenta.views.tipo_eliminar, name="tipo_eliminar"),
path('cuenta/', cuenta.views.cuenta, name="cuenta"),
path('cuenta/registrar/', cuenta.views.registrar, name="cuenta_registrar"),
path('cuenta/iniciar/', cuenta.views.iniciar_sesion, name="iniciar_sesion"),
path('cuenta/cerrar/', cuenta.views.cerrar_sesion, name="cerrar_sesion"),
path('cuenta/cambiarPassword/', cuenta.views.cambiarPassword, name="cambiarPassword"),
path('restringido/', cuenta.views.restringido, name='restringido'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 2.421875
| 2
|
youtubesearchpython/base/constants.py
|
cinemafactory2/youtube-search-python
| 1
|
12775706
|
<gh_stars>1-10
VIDEO_ELEMENT = 'videoRenderer'
CHANNEL_ELEMENT = 'channelRenderer'
PLAYLIST_ELEMENT = 'playlistRenderer'
SHELF_ELEMENT = 'shelfRenderer'
class ResultMode:
json = 0
dict = 1
class SearchMode:
videos = 'EgIQAQ%3D%3D'
channels = 'EgIQAg%3D%3D'
playlists = 'EgIQAw%3D%3D'
class VideoUploadDateFilter:
lastHour = 'EgQIARAB'
today = 'EgQIAhAB'
thisWeek = 'EgQIAxAB'
thisMonth = 'EgQIBBAB'
thisYear = 'EgQIBRAB'
class VideoDurationFilter:
short = 'EgQQARgB'
long = 'EgQQARgC'
class VideoSortOrder:
relevance = 'CAASAhAB'
uploadDate = 'CAISAhAB'
viewCount = 'CAMSAhAB'
rating = 'CAESAhAB'
| 1.640625
| 2
|
curators/agents/contextual.py
|
BellwethrInc/decision_engine
| 1
|
12775707
|
from curators.models.contextual_bandits import ClusterBandit
TYPE_MAPPING = {
'cluster': ClusterBandit,
}
class ContextualAgent:
"""Coming Soon.
"""
def __init__(self, n_profiles, n_actions, type='cluster'):
raise NotImplementedError
def update(self):
raise NotImplementedError
def sample_one(self, x):
raise NotImplementedError
def sample(self, X):
actions = []
for x in X:
actions.append(self.sample_one(x))
raise NotImplementedError
| 2.125
| 2
|
src/Game/__init__.py
|
MiguelReuter/Volley-ball-game
| 4
|
12775708
|
<reponame>MiguelReuter/Volley-ball-game<filename>src/Game/__init__.py
# encoding : UTF-8
from .ball import Ball
from .character import Character, Team
import Game.character_states as CharacterStates
from .court import Court
from .ground import Ground
| 1.429688
| 1
|
train_svm.py
|
aleaugustoplus/airline_ranker
| 0
|
12775709
|
<gh_stars>0
from pyspark import SparkConf, SparkContext
import sys
from pyspark.sql import SQLContext
from pyspark.ml.feature import Word2Vec
import re
from pyspark.sql.types import StructType, StructField, StringType, IntegerType, ArrayType
from pyspark.mllib.classification import SVMWithSGD, SVMModel
from pyspark.mllib.regression import LabeledPoint
#time spark-submit --master=yarn-client --num-executors 6 --executor-memory=30gb train_svm.py airline_train.txt out | tee out.txt
embedded_size=128
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def get_line(line):
try:
line=line.encode('ascii', 'ignore')
(comment, sentiment)=line.split(":::")
if comment is None or sentiment is None:
raise Exception('None Value!')
comment=clean_str(comment)
sentiment=int(sentiment)
return (comment, sentiment)
except:
return None
def get_words((a,b)):
o=a.split(" ")
return o,b
def get_labeled_points(record):
return LabeledPoint(int(record['sentiment']),record['result'])
def get_labeled_points_sample(record):
return LabeledPoint(int(1), record['result'])
def train_word2vec(data):
word2Vec = Word2Vec(vectorSize=embedded_size, minCount=0, inputCol="comment", outputCol="result")
model = word2Vec.fit(data)
# model.save(sc,"train_results/word2vec.train")
return model
def load_word2vec(sc):
model=Word2VecModel.load(sc, "googlew2v.bin")
return model
def word2vec_transform(data, model):
result = model.transform(data)
return result
def load_svm(sc):
model= SVMModel.load(sc, "target1/tmp/pythonSVMWithSGDModel")
return model
def load_data(sc, train_data):
data_schema = StructType([StructField('comment', ArrayType(StringType(), True)), #])
StructField('sentiment', IntegerType(), False), ])
data=train_data.map(get_line).filter(lambda l: l is not None)
data=data.map(get_words)
training, validation = data.randomSplit([0.8, 0.2])
training=training.toDF(data_schema).cache()
validation=validation.toDF(data_schema).cache()
return training, validation
def train_svm(points):
model = SVMWithSGD.train(points, iterations=200)
# Save and load model
#model.save(sc, "target3/tmp/pythonSVMWithSGDModel")
return model
def predict_svm(points, model, string):
labelsAndPreds = points.map(lambda p: (p.label, model.predict(p.features)))
#print "Predictions: ", labelsAndPreds.take(10)
trainErr = labelsAndPreds.filter(lambda (v, p): v != p).count() / float(points.count())
print(string + str(trainErr))
def predict_samples(sqlContext, w2v_model, svm_model):
# Input data: Each row is a bag of words from a sentence or document.
samples = sqlContext.createDataFrame([
("Flight attendants rude - had multiple flight attendants bump into me with carts and bodies, with no apologies. Offered 1 complimentary soda, with optional water now and then. Terrible service and basically no amenities. Not even chips or pretzels. Seat was dirty and no TV on return flight.".split(" "), ),
("I travelled from Sydney Australia to SFO in business the announcements were so loud and the screen behind the seat lit up every time there was any turbulence. Why did I pay a fortune for business class when I could get no sleep whatsoever. The volume of these announcements was the same on United flight SFO to BOS and Chicago to SFO.".split(" "), ),
("nhappy customer, traveling back from Toronto my wife was seated in row 12 I was in row 35. No room to put hand luggage had to sit with my legs in the isle with people tripping over them and hit with service trolleys. In flight entertainment didn't work for over half the flight. I went to customer service in San Francisco to complain and was to complain on line. But I could find complaints on there home page. poor effort United Airlines.".split(" "), ),
("On 5 out of 6 flights I've taken I have been told their was minor delays meaning pilot or other employee states 5 to 10 minutes. These delays always seem to translate to 2 to 3 hours whilst the entire time someone is reassuring you it will be just a little longer. Recently made a 4 hour drive to the airport to find out my flight was delayed almost 6 hours, something they were well aware of when the aircraft left cross country flight 5 hours away. Why wasn't I emailed or called. Very often employees shift blame elsewhere too and have little to no actual information.".split(" "), )
], ["comment"])
samples_wvec=word2vec_transform(samples, w2v_model)
#print "Samples:", samples_wvec.take(10)
samples_point=samples_wvec.map(get_labeled_points_sample)
predict_svm(samples_point, svm_model, "Samples error:")
def train_svm_w2vec(sc,inputs):
training, validation=load_data(sc, inputs)
w2v_model=train_word2vec(training)
t_training=word2vec_transform(training, w2v_model)
t_validation=word2vec_transform(validation, w2v_model)
training_points=t_training.map(get_labeled_points).cache()
validation_points=t_validation.map(get_labeled_points).cache()
svm_model=train_svm(training_points)
predict_svm(training_points, svm_model, "Training error:")
predict_svm(validation_points, svm_model, "Validation error:")
return svm_model, w2v_model
def main():
conf = SparkConf().setAppName('Airline Data')
sc = SparkContext(conf=conf)
sqlContext = SQLContext(sc)
inputs = sys.argv[1]
output = sys.argv[2]
svm_model, w2v_model = train_svm_w2vec(sc, inputs)
predict_samples(sqlContext, w2v_model, svm_model)
if __name__ == "__main__":
main()
| 2.40625
| 2
|
company/views.py
|
WillieIlus/jobscorner
| 2
|
12775710
|
from builtins import super
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
# from django.core.checks import messages
from django.shortcuts import render, get_object_or_404
from django.urls import reverse_lazy
from django.utils.decorators import method_decorator
from django.views.generic import DeleteView, CreateView
from extra_views import CreateWithInlinesView, UpdateWithInlinesView, InlineFormSetFactory
from hitcount.views import HitCountDetailView
from accounts.decorators import UserRequiredMixin, employer_required
from category.models import Category
from company.models import Company, CompanyImage, OpeningHours, ClosingRules
# Category views
from jobcorner import settings
from location.models import Location
from reviews.forms import ReviewForm
from .filters import CompanyFilter
from .forms import CompanyForm, OpeningHoursForm, CompanyFilterForm
def company_list_view(request):
company_list = Company.published.all()
company_filter = CompanyFilter(request.GET, queryset=company_list)
form = CompanyFilterForm(data=request.GET)
facets = {
"selected": {},
"catego": {
"category": Category.objects.all(),
"location": Location.objects.all(),
},
}
if form.is_valid():
category = form.cleaned_data["category"]
if category:
facets["selected"]["category"] = category
company_list = company_list.filter(category=category).distinct()
location = form.cleaned_data["location"]
if location:
facets["selected"]["location"] = location
company_list = company_list.filter(location=location).distinct()
if settings.DEBUG:
from pprint import pprint
pprint(facets)
context = {
"form": form,
"facets": facets,
"object_list": company_list,
'filter': company_filter,
}
return render(request, 'company/list.html', context)
class PhotosInline(InlineFormSetFactory):
model = CompanyImage
# form_class = CompanyPhotoFormSet
fields = ['img', 'alt']
@method_decorator([login_required, employer_required], name='dispatch')
class CompanyCreate(CreateWithInlinesView):
model = Company
inlines = [PhotosInline]
form_class = CompanyForm
template_name = 'company/form.html'
def forms_valid(self, form, inlines):
form.instance.user = self.request.user
return super(CompanyCreate, self).forms_valid(form, inlines)
# def get_context_data(self, **kwargs):
# context = super().get_context_data(**kwargs)
# context['title'] = "Add your Company"
class CompanyEdit(LoginRequiredMixin, UserRequiredMixin, UpdateWithInlinesView):
model = Company
inlines = [PhotosInline]
slug_url_kwarg = 'slug'
form_class = CompanyForm
template_name = 'company/form.html'
# def get_context_data(self, **kwargs):
# context = super().get_context_data(**kwargs)
# # context['company'] = Company.objects.all()[:5]
# context['title'] = " Update Company "
def forms_valid(self, form, inlines):
form.instance.user = self.request.user
return super(CompanyEdit, self).forms_valid(form, inlines)
def get_success_url(self):
return self.object.get_absolute_url()
class CompanyDelete(LoginRequiredMixin, UserRequiredMixin, DeleteView):
model = Company
success_url = reverse_lazy('company:list')
template_name = 'delete.html'
class CompanyDetail(HitCountDetailView):
queryset = Company.published.all()
template_name = 'company/detail.html'
context_object_name = 'company'
slug_field = 'slug'
count_hit = True
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# context['meta'] = self.get_object().as_meta(self.request)
context['company_image'] = CompanyImage.objects.filter(company=self.get_object())
context['open_hours'] = OpeningHours.objects.filter(company=self.get_object())
context['closing_rules'] = ClosingRules.objects.filter(company=self.get_object())
context['form'] = ReviewForm()
context['related'] = self.object.tags.similar_objects()[:4]
return context
def post(self, request, *args, **kwargs):
self.object = self.get_object()
return super().post(request, *args, **kwargs)
#
# class OpeningHourCreate(LoginRequiredMixin, ModelFormSetView):
# model = OpeningHours
# form_class = OpeningHoursForm
# # formset_class = OpeningHoursFormset
# template_name = 'company/formset.html'
# flocationy_kwargs = {'can_order': False, 'can_delete': False}
# # formset_kwargs = {'auto_id': 'my_id_%s'}
#
#
# def form_valid(self, form):
# form.instance.company = get_object_or_404(Company, slug=self.kwargs['slug'])
# form.save()
# return super().form_valid(form)
#
# def form_invalid(self, form):
# """
# If the form is invalid, re-render the context data with the
# data-filled form and errors.
# """
# print('the is an error in your form')
# messages.warning(self.request, 'There was an error in this form')
# return self.render_to_response(self.get_context_data(form=form))
#
class OpeningHourCreate(LoginRequiredMixin, CreateView):
model = OpeningHours
form_class = OpeningHoursForm
template_name = 'form.html'
def form_valid(self, form):
form.instance.company = get_object_or_404(Company, slug=self.kwargs['slug'])
form.save()
return super().form_valid(form)
def form_invalid(self, form):
"""
If the form is invalid, re-render the context data with the
data-filled form and errors.
"""
print('the is an error in your form')
messages.warning(self.request, 'There was an error in this form')
return self.render_to_response(self.get_context_data(form=form))
# follow(request.user)
# unfollow(request.user)
# followers(request.user) # returns a list of Users who follow request.user
# following(request.user) # returns a list of locations who request.user is following
| 1.945313
| 2
|
foiamachine/local/lib/python2.7/encodings/mac_cyrillic.py
|
dwillis/foiamachine
| 3
|
12775711
|
/usr/lib/python2.7/encodings/mac_cyrillic.py
| 1.070313
| 1
|
cloudmersive_convert_api_client/models/replace_string_regex_request.py
|
Cloudmersive/Cloudmersive.APIClient.Python.Convert
| 3
|
12775712
|
# coding: utf-8
"""
convertapi
Convert API lets you effortlessly convert file formats and types. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ReplaceStringRegexRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'text_content': 'str',
'regular_expression_string': 'str',
'replace_with_string': 'str'
}
attribute_map = {
'text_content': 'TextContent',
'regular_expression_string': 'RegularExpressionString',
'replace_with_string': 'ReplaceWithString'
}
def __init__(self, text_content=None, regular_expression_string=None, replace_with_string=None): # noqa: E501
"""ReplaceStringRegexRequest - a model defined in Swagger""" # noqa: E501
self._text_content = None
self._regular_expression_string = None
self._replace_with_string = None
self.discriminator = None
if text_content is not None:
self.text_content = text_content
if regular_expression_string is not None:
self.regular_expression_string = regular_expression_string
if replace_with_string is not None:
self.replace_with_string = replace_with_string
@property
def text_content(self):
"""Gets the text_content of this ReplaceStringRegexRequest. # noqa: E501
Input text content # noqa: E501
:return: The text_content of this ReplaceStringRegexRequest. # noqa: E501
:rtype: str
"""
return self._text_content
@text_content.setter
def text_content(self, text_content):
"""Sets the text_content of this ReplaceStringRegexRequest.
Input text content # noqa: E501
:param text_content: The text_content of this ReplaceStringRegexRequest. # noqa: E501
:type: str
"""
self._text_content = text_content
@property
def regular_expression_string(self):
"""Gets the regular_expression_string of this ReplaceStringRegexRequest. # noqa: E501
Target input regular expression (regex) string to match and be replaced; supports all regular expression values # noqa: E501
:return: The regular_expression_string of this ReplaceStringRegexRequest. # noqa: E501
:rtype: str
"""
return self._regular_expression_string
@regular_expression_string.setter
def regular_expression_string(self, regular_expression_string):
"""Sets the regular_expression_string of this ReplaceStringRegexRequest.
Target input regular expression (regex) string to match and be replaced; supports all regular expression values # noqa: E501
:param regular_expression_string: The regular_expression_string of this ReplaceStringRegexRequest. # noqa: E501
:type: str
"""
self._regular_expression_string = regular_expression_string
@property
def replace_with_string(self):
"""Gets the replace_with_string of this ReplaceStringRegexRequest. # noqa: E501
Replacement for target string; supports referencing indexed regex matched values from RegularExpressionString, such as $1, $2, and so on # noqa: E501
:return: The replace_with_string of this ReplaceStringRegexRequest. # noqa: E501
:rtype: str
"""
return self._replace_with_string
@replace_with_string.setter
def replace_with_string(self, replace_with_string):
"""Sets the replace_with_string of this ReplaceStringRegexRequest.
Replacement for target string; supports referencing indexed regex matched values from RegularExpressionString, such as $1, $2, and so on # noqa: E501
:param replace_with_string: The replace_with_string of this ReplaceStringRegexRequest. # noqa: E501
:type: str
"""
self._replace_with_string = replace_with_string
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ReplaceStringRegexRequest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ReplaceStringRegexRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 2.28125
| 2
|
DAO/BotDAOFile.py
|
sebampuero/dummy-discord-bot
| 0
|
12775713
|
<reponame>sebampuero/dummy-discord-bot
import json
class BotDAOFile():
def get_radios(self):
f = open("./config/radio_stations.json", "r")
radios = json.loads(f.read())
f.close()
return radios
def get_users_welcome_audios(self):
f = open("./config/users_audio_map.json", "r")
user_ids_to_audio_map = json.load(f)
f.close()
return user_ids_to_audio_map
def save_radios(self, radios_new):
with open("./config/radio_stations.json", 'w', encoding='utf-8') as f:
json.dump(radios_new, f, ensure_ascii=False, indent=4)
def save_users_welcome_audios(self, new_):
with open("./config/users_audio_map.json", 'w', encoding='utf-8') as f:
json.dump(new_, f, ensure_ascii=False, indent=4)
| 2.71875
| 3
|
aavso/webobs.py
|
dtouzan/ciboulette
| 1
|
12775714
|
"""
WebObs class
"""
from astropy.table import Table
from astropy import units as u
from astropy.coordinates import SkyCoord
import numpy as np
import matplotlib.pyplot as plt
from bs4 import BeautifulSoup
import os
import io
import wget
import requests
class WebObs(object):
"""
Class for AAVSO web observation.
fileoutput = aavso.html
filtername = vis|ccd
"""
def __init__(self, nameID, filtername='Vis', fileoutput='aavso.html'):
self.nameID = nameID
self.filter = filtername
self.fileoutput = fileoutput
self.titlename = ''
self.comment = ''
self.observation = Table()
self.html = BeautifulSoup()
self.available = False
self._period = 0
self.filter = self.isfilter(filtername)
self.read
@property
def read(self):
"""
Return html of observation
Ex: wget --no-check-certificate 'https://app.aavso.org/webobs/results/?star=' -O aavso.html
"""
if os.path.exists(self.fileoutput) :
os.remove(self.fileoutput)
if ' ' in self.nameID:
nameID = self.nameID.replace(' ','%20')
else:
nameID = self.nameID
if self.isccd:
filtername = 'ccd'
else:
filtername = 'vis'
url = 'https://app.aavso.org/webobs/results/?star=' + nameID + '&num_results=200' + '&obs_types=' + filtername
filedownload = wget.download(url,out=self.fileoutput,bar=None)
with open(filedownload) as fp:
self.html = BeautifulSoup(fp, 'html.parser')
if self.noerror == 0 :
self.available = True
self.title
self.comments
self.table
else:
self.available = False
@property
def title(self):
self.titlename = self.html.title.contents[0] + ' -- ' + self.nameID
return self.titlename
@property
def comments(self):
if self.available:
comment = self.html.find(id='obsinfo').contents[0].string
comment = comment + self.html.find(id='obsinfo').contents[1].string
comment = comment + self.html.find(id='obsinfo').contents[2].string.replace('\n \n','').replace('\n','').replace(' ',' ')
comment = comment + self.html.find(id='obsinfo').contents[3].string
comment = comment + self.html.find(id='obsinfo').contents[4].string.replace('\n \n \n \n ','')
comment = comment + self.html.find(id='obsinfo').contents[5].string
comment = comment + self.html.find(id='obsinfo').contents[6].string.replace('\n \n \n \n \n ','')
self.comment = comment
return self.comment
def isfilter(self,filtername='vis'):
"""
Return filter
"""
if filtername in ['Vis','I','R','B','V']:
f = filtername
else:
f = 'Vis'
return f
@property
def isccd(self):
"""
Return true if in ccd filter
"""
if self.filter in ['I','R','B','V']:
return True
else:
return False
@property
def data(self):
"""
Return data of html file observations
"""
data = []
if self.available:
data = self.html.table.contents[3].get_text().replace('\n','|').replace('Details...|||||||||Comp Star|Check Star|Transformed|Chart|Comment Codes|Notes|||||','').replace('|||||||||','<>').replace('|||||||','').replace('|||','').replace('| (','(').replace('| ','').split('<>')
return data
@property
def table(self):
"""
Return Table of html file observations
"""
Star = []
JD = []
Calendar_Date = []
Magnitude = []
Error = []
Filter = []
Observer = []
Comp_Star = []
Check_Star = []
Transformed = []
Chart = []
Comment_Codes = []
Notes = []
if self.available:
for ligne in self.data:
data = ligne.split('|')
if self.filter in data[5]:
Star.append(data[0])
JD.append(float(data[1]))
Calendar_Date.append(data[2])
if isinstance(data[3], int) or isinstance(data[3], float):
Magnitude.append(float(data[3]))
else:
Magnitude.append(float(data[3].replace('<','')))
Error.append(data[4])
Filter.append(data[5])
Observer.append(data[6])
Comp_Star.append(data[7])
Check_Star.append(data[8])
Transformed.append(data[9])
Chart.append(data[10])
Comment_Codes.append(data[11])
Notes.append(data[12])
if len(Star) > 0:
self.observation = Table([Star,JD,Calendar_Date,Magnitude,Error,Filter,Observer,Comp_Star,Check_Star,Transformed,Chart,Comment_Codes,Notes],
names=['Star', 'JD', 'Calendar Date', 'Magnitude', 'Error', 'Filter', 'Observer', 'Comp Star', 'Check Star', 'Transformed', 'Chart', 'Comment Codes', 'Notes'])
self._period = self.observation['JD'][0] - self.observation['JD'][len(self.observation)-1]
return self.observation
@property
def period(self):
"""
Return period JD
"""
if self.observation:
return self._period
@property
def observations(self):
"""
Return observations table
"""
if self.observation:
return self.observation
@property
def JDMinMax(self):
"""
Return min and max JD in observations table
"""
if self.observation:
return self.observation['JD'][len(self.observation)-1],self.observation['JD'][0]
@property
def magnitudeMinMax(self):
"""
Return min and max of magnitude in observations table
"""
if self.observation:
return min(self.observation['Magnitude']),max(self.observation['Magnitude'])
def plot(self):
"""
Plot observations table
"""
if self.available:
jd_min,jd_max = self.JDMinMax
mv_min,mv_max = self.magnitudeMinMax
x = []
for value in self.observations:
x.append(value['JD']-jd_min)
y = self.observations['Magnitude']
mymodel = np.poly1d(np.polyfit(x, y, 5))
myline = np.linspace(0, jd_max-jd_min, 2000)
plt.xlim(-.5,round(jd_max-jd_min)+.5)
plt.ylim(round(mv_min)-0.5,round(mv_max)+0.5)
plt.gca().invert_yaxis()
plt.scatter(x, y, c = 'black', s = 2, alpha = 0.5)
plt.plot(myline, mymodel(myline))
plt.title(self.title, loc='center')
plt.xlabel(str(int(jd_min))+'\nJD', fontsize = 12)
if self.filter == 'Vis':
plt.ylabel(r'$m_v$', fontsize = 12)
else:
plt.ylabel(self.filter, fontsize = 12)
plt.show()
else:
print(self.comment)
@property
def noerror(self):
"""
Error handling
"""
error_code = 0
if 'errors' in self.html.p.get_text():
error_code = 404
self.comment = 'The star ' + self.nameID + ' cannot be found in our database.'
else:
if 'no results' in self.html.p.get_text():
error_code = 404
self.comment = 'The star ' + self.nameID + ' cannot be found in our database.'
return error_code
class datadownload(object):
"""
Class for AAVSO for data download (https://www.aavso.org/data-download).
fileinput = datadownload.csv
filtername = Vis|B|V|R|I|TG|CV
"""
def __init__(self, filtername='Vis.', fileinput='aavsodata.csv'):
self.nameID = ''
self.filter = filtername
self.fileinput = fileinput
self.titlename = ''
self.comment = ''
self.observation = Table()
self.JDline = _JDline()
self.available = False
self._period = 0
self.filter = self.isfilter(filtername)
self.read
def isfilter(self,filtername='Vis.'):
"""
Return filter
"""
if filtername in ['Vis.','I','R','B','V','CV','TG']:
f = filtername
else:
f = 'Vis.'
return f
@property
def read(self):
"""
Return table of observation
"""
self.observation = Table.read(self.fileinput, format='ascii.csv')
if len(self.observation) > 0:
self.available = True
self.title
self.period
self.comments
else:
self.available = False
def filtername(self, filtername='Vis.'):
"""
Update filter
"""
if self.available:
self.filter = self.isfilter(filtername)
@property
def Vis(self):
if self.available:
self.filter = 'Vis.'
@property
def I(self):
if self.available:
self.filter = 'I'
@property
def R(self):
if self.available:
self.filter = 'R'
@property
def V(self):
if self.available:
self.filter = 'V'
@property
def B(self):
if self.available:
self.filter = 'B'
@property
def CV(self):
if self.available:
self.filter = 'CV'
@property
def TG(self):
if self.available:
self.filter = 'TG'
@property
def period(self):
"""
Return period JD
"""
if self.available:
self._period = self.observation['JD'][len(self.observation)-1] - self.observation['JD'][0]
return self._period
@property
def title(self):
if self.available:
self.titlename = 'AAVSO -- data-download -- ' + self.observation['Star Name'][0]
return self.titlename
@property
def comments(self):
if self.available:
observers = []
for i in self.observation['Observer Code'] :
if i not in observers:
observers.append(i)
comment = 'Showing ' + str(len(self.observation)) + ' observations for ' + self.observation['Star Name'][0] + ' from ' + str(len(observers)) + ' observers'
self.comment = comment
return self.comment
@property
def observations(self):
"""
Return observations table
"""
if self.observation:
return self.observation
@property
def JDMinMax(self):
"""
Return min and max JD in observations table
"""
if self.observation:
return self.observation['JD'][0],self.observation['JD'][len(self.observation)-1]
@property
def magnitudeMinMax(self):
"""
Return min and max of magnitude in observations table
"""
if self.observation:
mv = []
for value in self.observations:
if self.filter == value['Band']:
if '<' not in value['Magnitude']:
mv.append(float(value['Magnitude']))
return min(mv),max(mv)
@property
def JulianDay(self):
"""
Return JD table
"""
return self.JDline.JulianDay
@JulianDay.setter
def JulianDay(self,JDtable):
"""
Create JD table
"""
self.JDline.JulianDay = JDtable
def plot(self):
"""
Plot observations table
"""
if self.available:
jd_min,jd_max = self.JDMinMax
mv_min,mv_max = self.magnitudeMinMax
x = []
y = []
for value in self.observations:
if self.filter == value['Band']:
if '<' not in value['Magnitude']:
x.append(value['JD'])
y.append(float(value['Magnitude']))
plt.xlim(round(jd_min)-5,round(jd_max)+5)
plt.ylim(round(mv_min)-1,round(mv_max)+1)
plt.gca().invert_yaxis()
plt.scatter(x, y, c = 'black', s = 2, alpha = 0.2)
self.JDline.plot()
plt.title(self.title, loc='center')
plt.xlabel('JD', fontsize = 12)
if self.filter == 'Vis':
plt.ylabel(r'$m_v$', fontsize = 12)
else:
plt.ylabel(self.filter, fontsize = 12)
plt.show()
class vsx(object):
"""
Class AAVSO VSX, return TABLE
"""
def __init__(self, nameID):
self.nameID = nameID
self.vsx_table = Table()
self.available = False
self.read
@property
def read(self):
"""
Return TABLE of Variable
"""
self.table
@property
def data(self):
"""
Return JSON data
Source : https://www.aavso.org/direct-web-query-vsxvsp
"""
if ' ' in self.nameID:
nameID = self.nameID.replace(' ','%20')
else:
nameID = self.nameID
url = "http://www.aavso.org/vsx/index.php"
params = {}
params['view']='api.object'
params['format']='json'
params['ident']=self.nameID
response = requests.get(url,params=params)
if (response.status_code > 400):
self.available = False
else:
self.available = True
return response.json()
@property
def table(self):
"""
Return data table
"""
result = self.data['VSXObject']
header = []
value = []
types = []
for item in result:
value.append(result[item])
header.append(item)
types.append('str')
self.vsx_table = Table(names = header, dtype = types)
self.vsx_table.add_row(value)
@property
def observations(self):
"""
Return vsx table
"""
if self.available:
return self.vsx_table
@property
def name(self):
"""
Return vsx name
"""
if self.available:
return self.vsx_table['Name'][0]
@property
def coordinates(self):
"""
Return vsx RA,DEC (degree,degree)
"""
if self.available:
return float(self.vsx_table['RA2000']), float(self.vsx_table['Declination2000'])
@property
def hourdegree(self):
"""
Return vsx RA,DEC (Hour,Degree)
"""
if self.available:
c = SkyCoord(ra=float(self.vsx_table['RA2000'])*u.degree, dec=float(self.vsx_table['Declination2000'])*u.degree)
return c.ra.hour, c.dec.degree
class _JDline(object):
"""
Class line Julian Day
"""
def __init__(self):
self.JDtable = []
@property
def JulianDay(self):
"""
Return JD table
"""
return self.JDtable
@JulianDay.setter
def JulianDay(self,JDtable):
"""
Add JD's
"""
if len(JDtable) > 0:
for number in JDtable:
self.JDtable.append(number)
else:
self.JDtable.clear()
def plot(self):
"""
Plot line of JD's
"""
plt.vlines(self.JDtable, -30,30 , linestyles = 'solid', colors = 'grey', alpha = 0.3)
| 2.984375
| 3
|
Concrete Data Set/normalize_conco.py
|
s-saitej/Linear-Regression-Problems
| 0
|
12775715
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 25 13:52:02 2018
@author: sunka
"""
import pandas as pd
dataframe = pd.read_csv('concrete.csv')
print(dataframe)
df1 = dataframe.iloc[:,1:]
df2 = dataframe.iloc[:,:8]
from sklearn import preprocessing
df1 = preprocessing.normalize(df1)
df2 = preprocessing.normalize(df2)
from sklearn import model_selection
train_data , test_data , train_target , test_target = model_selection.train_test_split(df1,df2)
from sklearn import linear_model
regression = linear_model.LinearRegression()
fitting = regression.fit(train_data,train_target)
result = regression.predict(test_data)
print(result)
coefficient = regression.coef_
intercept = regression.intercept_
print("The coefficeint is " + str(coefficient))
print("Intercept is " + str(intercept))
from sklearn import metrics
mean_square_error = metrics.mean_squared_error(test_target,result)
print("Mean square error is " + str(mean_square_error))
varience = metrics.r2_score(test_target,result)
print("Varience is " + str(varience))
from matplotlib import pyplot
pyplot.hist(train_data)
pyplot.hist(result)
# Output
pyplot.scatter(test_target,result)
pyplot.title("Output")
pyplot.scatter(result,result-test_target)
pyplot.title('Residue')
| 3.0625
| 3
|
core/shape_set.py
|
neodyme60/raypy
| 1
|
12775716
|
from core.differential_geometry import DifferentialGeometry
from core.distribution1d import Distribution1D
from core.light_sample import LightSample
from core.ray import Ray
from core.shape import Shape
from maths.config import infinity_max_f
from maths.normal import Normal
from maths.point3d import Point3d
from maths.vector3d import Vector3d
class ShapeSet:
def __init__(self, shape: Shape):
self.shapes = []
self.sumArea = 0.0
self.areas = []
self.areaDistribution = []
todo = [shape]
while len(todo)>0:
sh = todo.pop()
if sh.get_can_intersect():
self.shapes.append(sh)
else:
sh.get_refine(todo)
self.sumArea = 0.0
for i in range(len(self.shapes)):
area = self.shapes[i].Area()
self.areas.append(area)
self.sumArea += area
self.areaDistribution = Distribution1D(self.areas)
def Sample1(self, p: Point3d, ls: LightSample, Ns: Normal)->Point3d:
pdf, sn = self.areaDistribution.SampleDiscrete(ls.uComponent)
pt = self.shapes[sn].Sample2(p, ls.uPos, Ns)
# Find closest intersection of ray with shapes in _ShapeSet_
r = Ray(p, (pt-p).get_normalized(), 1e-3, infinity_max_f)
anyHit = False
thit = 1.0
dg = DifferentialGeometry()
for i in range(len(self.shapes)):
anyHit_b, thit_f = self.shapes[i].get_intersection(r, dg)
if anyHit_b:
anyHit = True
thit = thit_f
if anyHit:
Ns.Set(dg.normal)
return r.get_at(thit)
def Sample2(self, ls: LightSample, n: Normal)->Point3d:
pdf, sn = self.areaDistribution.SampleDiscrete(ls.uComponent)
return self.shapes[sn].Sample1(ls.uPos, n)
def Area(self):
return self.sumArea
def Pdf1(self, p: Point3d)->float:
pdf = 0.0
for i in range(len(self.shapes)):
pdf += self.areas[i] * self.shapes[i].Pdf1(p)
return pdf / self.sumArea
def Pdf2(self, p: Point3d, wi: Vector3d)->float:
pdf = 0.0
for i in range(len(self.shapes)):
pdf += self.areas[i] * self.shapes[i].Pdf2(p, wi)
return pdf / self.sumArea
| 2.28125
| 2
|
Chapter 04/ch41a.py
|
bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE
| 0
|
12775717
|
print(27 + 8 * 2 - 6)
#37
| 1.914063
| 2
|
src/xsd_members/bulk_select.py
|
minyiky/xSACdb
| 2
|
12775718
|
from .models import MemberProfile
import io
import csv
TOKEN_NAME = 'names'
def get_some_objects(list):
return MemberProfile.objects.filter(pk__in=list)
def parse_token_data(request_post):
f = io.StringIO(request_post[TOKEN_NAME])
reader = csv.reader(f, delimiter=',')
user_ids = []
for row in reader:
user_ids = set(row)
return user_ids
def get_bulk_members(request, method="POST"):
if method == "POST":
user_ids = parse_token_data(request.POST)
elif method == "GET":
user_ids = parse_token_data(request.GET)
else:
raise ValueError("Unsupported method")
members = get_some_objects(user_ids)
return members
| 2.421875
| 2
|
src/extension/snapshot.py
|
ytsmiling/lmt
| 32
|
12775719
|
<gh_stars>10-100
import os
import chainer
from chainer.training import extension
class Snapshot(extension.Extension):
"""Trainer extension that save network parameters to 'snapshot.npz'.
"""
def __call__(self, trainer):
filename = os.path.join(trainer.out, 'snapshot.npz')
chainer.serializers.save_npz(filename, trainer.updater.get_optimizer('main').target)
| 2.125
| 2
|
kid_readout/measurement/acquire.py
|
danielflanigan/kid_readout
| 0
|
12775720
|
"""
Basic framework for acquiring a roach measurement that includes both sweep(s) and stream(s).
Acquire
-Initialize equipment.
-Initialize roach: preload frequencies, if necessary.
-Create state dictionary containing state from all equipment, including temperatures, if possible.
-Run a coarse sweep, if necessary: create a SweepArray and extract resonance frequencies.
-Run fine sweeps to map out resonance frequencies carefully.
If desired, we can combine the data from coarse and fine sweeps into a single SweepArray.
All streams in these sweeps are created with the same roach state, which should not change during the sweeps.
The sweep(s) are created with the experiment state, which should also not change.
Acquire streams:
-Initialize equipment for stream(s).
-Initialize roach for stream(s).
-Create experiment state dictionary.
-Acquire a StreamArray.
-Repeat the stream acquisition as needed
-Instantiate the final measurement with all data, and save it to disk.
-Clean up equipment.
If instead we want to save data as it is collected, we can do that by writing a blank final measurement to disk, then
writing the sub-measurements as they are acquired.
"""
from __future__ import division
import os
import sys
import time
import inspect
import subprocess
import logging
import numpy as np
from kid_readout import settings
from kid_readout.utils import log
from kid_readout.measurement import core, basic
from kid_readout.measurement.io import nc, npy
logger = logging.getLogger(__name__)
# Frequency sweep
def load_baseband_sweep_tones(ri, tone_banks, num_tone_samples):
return ri.set_tone_freqs(freqs=np.vstack(tone_banks), nsamp=num_tone_samples)
def load_heterodyne_sweep_tones(ri, tone_banks, num_tone_samples):
return ri.set_tone_freqs(freqs=np.vstack(tone_banks), nsamp=num_tone_samples)
def run_sweep(ri, tone_banks, num_tone_samples, length_seconds=0, state=None, description='', verbose=False,
wait_for_sync=0.1, **kwargs):
"""
Return a SweepArray acquired using the given tone banks.
Parameters
----------
ri : RoachInterface
An instance of a subclass.
tone_banks : iterable of ndarray (float)
An iterable of arrays (or a 2-D array) of frequencies to use for the sweep.
num_tone_samples : int
The number of samples in the playback buffer; must be a power of two.
length_seconds : float
The duration of each data stream; the default of 0 means the minimum unit of data that can be read out in the
current configuration.
state : dict
The non-roach state to pass to the SweepArray.
description : str
A human-readable description of the measurement.
verbose : bool
If true, print progress messages.
wait_for_sync : float
Sleep for this time in seconds to let the ROACH sync finish.
kwargs
Keyword arguments passed to ri.get_measurement().
Returns
-------
SweepArray
"""
stream_arrays = core.MeasurementList()
if verbose:
print("Measuring bank")
for n, tone_bank in enumerate(tone_banks):
if verbose:
print n,
sys.stdout.flush()
ri.set_tone_freqs(tone_bank, nsamp=num_tone_samples)
ri.select_fft_bins(np.arange(tone_bank.size))
# we wait a bit here to let the roach2 sync catch up. figuring this out still.
time.sleep(wait_for_sync)
stream_arrays.append(ri.get_measurement(num_seconds=length_seconds, **kwargs))
return basic.SweepArray(stream_arrays, state=state, description=description)
def run_loaded_sweep(ri, length_seconds=0, state=None, description='', tone_bank_indices=None, bin_indices=None,
verbose=False, **kwargs):
"""
Return a SweepArray acquired using previously-loaded tones.
Parameters
----------
ri : RoachInterface
An instance of a subclass.
length_seconds : float
The duration of each data stream; the default of 0 means the minimum unit of data that can be read out in the
current configuration.
state : dict
The non-roach state to pass to the SweepArray.
description : str
A human-readable description of the measurement.
tone_bank_indices : numpy.ndarray[int]
The indices of the tone banks to use in the sweep; the default is to use all existing.
bin_indices : numpy.ndarray[int]
The indices of the filterbank bins to read out; the default is to read out all bins.
verbose : bool
If true, print progress messages.
kwargs
Keyword arguments passed to ri.get_measurement().
Returns
-------
SweepArray
"""
if tone_bank_indices is None:
tone_bank_indices = np.arange(ri.tone_bins.shape[0])
if bin_indices is None:
bin_indices = np.arange(ri.tone_bins.shape[1])
stream_arrays = core.MeasurementList()
if verbose:
print "Measuring bank:",
for tone_bank_index in tone_bank_indices:
if verbose:
print tone_bank_index,
sys.stdout.flush()
ri.select_bank(tone_bank_index)
ri.select_fft_bins(bin_indices)
stream_arrays.append(ri.get_measurement(num_seconds=length_seconds, **kwargs))
return basic.SweepArray(stream_arrays, state=state, description=description)
def run_multipart_sweep(ri, length_seconds=0, state=None, description='', num_tones_read_at_once=32, verbose=False,
**kwargs):
num_tones = ri.tone_bins.shape[1]
num_steps = num_tones // num_tones_read_at_once
if num_steps == 0:
num_steps = 1
indices_to_read = range(num_tones)
parts = []
for step in range(num_steps):
if verbose:
print("running sweep step {} of {}.".format(step,num_steps))
parts.append(run_loaded_sweep(ri, length_seconds=length_seconds, state=state, description=description,
bin_indices=indices_to_read[step::num_steps], **kwargs))
stream_arrays = core.MeasurementList()
for part in parts:
stream_arrays.extend(list(part.stream_arrays))
return basic.SweepArray(stream_arrays, state=state, description=description)
# Metadata
def script_code():
"""
Return the source code of a module running as '__main__'. Acquisition scripts can use this to save their code.
If attempting to load the source code raises an exception, return a string representation of the exception.
Returns
-------
str
The code, with lines separated by newline characters.
"""
try:
return inspect.getsource(sys.modules['__main__'])
except Exception as e:
return str(e)
def git_log():
import kid_readout
kid_readout_directory = os.path.dirname(os.path.abspath(kid_readout.__file__))
try:
return subprocess.check_output(("cd {}; git log -1".format(kid_readout_directory)), shell=True)
except Exception as e:
return str(e)
def git_status():
import kid_readout
kid_readout_directory = os.path.dirname(os.path.abspath(kid_readout.__file__))
try:
return subprocess.check_output(("cd {}; git status --porcelain".format(kid_readout_directory)), shell=True)
except Exception as e:
return str(e)
def all_metadata():
meta = {'script_code': script_code(),
'git_log': git_log(),
'git_status': git_status(),
'cryostat': settings.CRYOSTAT,
'cooldown': settings.COOLDOWN}
return meta
# IO object creation
def new_nc_file(suffix='', directory=settings.BASE_DATA_DIR, metadata=None):
if suffix and not suffix.startswith('_'):
suffix = '_' + suffix
if metadata is None:
metadata = all_metadata()
root_path = os.path.join(directory, time.strftime('%Y-%m-%d_%H%M%S') + suffix + nc.NCFile.EXTENSION)
logger.debug("Creating new NCFile with path %s" % root_path)
return nc.NCFile(root_path, metadata=metadata)
def new_npy_directory(suffix='', directory=settings.BASE_DATA_DIR, metadata=None):
if suffix and not suffix.startswith('_'):
suffix = '_' + suffix
if metadata is None:
metadata = all_metadata()
root_path = os.path.join(directory, time.strftime('%Y-%m-%d_%H%M%S') + suffix + npy.NumpyDirectory.EXTENSION)
logger.debug("Creating new NumpyDirectory with path %s" % root_path)
return npy.NumpyDirectory(root_path, metadata=metadata)
# Interactive checks to be used at the beginning of scripts
def show_settings():
print("cryostat: {}".format(settings.CRYOSTAT))
for k, v in settings.COOLDOWN.items():
print("{}: {}".format(k, v))
raw_input("Press enter to continue or ctrl-C to quit.")
def show_git_status():
print("git status:")
print(git_status())
raw_input("Press enter to continue or ctrl-C to quit.")
# Logging
def get_script_logger(name, level=logging.INFO):
script_logger = logging.getLogger('kid_readout')
script_logger.setLevel(logging.DEBUG)
if log.default_handler not in script_logger.handlers:
stream_handler = log.default_handler
stream_handler.setLevel(level)
script_logger.addHandler(stream_handler)
script_logger.addHandler(log.file_handler(name))
return script_logger
| 2.5
| 2
|
tsx/import_data_source.py
|
nesp-tsr/tsx
| 3
|
12775721
|
from tsx.db import get_session
import logging
import sys
import argparse
import csv
from tqdm import tqdm
log = logging.getLogger(__name__)
def main():
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='%(asctime)-15s %(name)s %(levelname)-8s %(message)s')
parser = argparse.ArgumentParser(description='Import data source information')
parser.add_argument('--relax', action='store_true', dest='relax', help="Ignore invalid source ids")
parser.add_argument('--update-source-table', action='store_true', dest='update_source_table', help="Also update `source` table with authors, provider and description information")
parser.add_argument('filename', type=str, help='Data source file (aka master list) (CSV)')
args = parser.parse_args()
session = get_session()
session.execute("DELETE FROM data_source");
with open(args.filename) as f:
reader = csv.DictReader(f)
for row in tqdm(list(reader)):
row = {k: v.strip() for k, v in row.items()}
data = {
'source_id': row['SourceID'],
'taxon_id': row['TaxonID'],
'data_agreement_id': row.get('AgreementSigned') or None,
'objective_of_monitoring_id': lookup(row, 'ObjectiveOfMonitoring'),
'absences_recorded': get_bool(row, 'AbsencesRecorded', True, unknown_value_default=True),
'standardisation_of_method_effort_id': lookup(row, 'StandardisationOfMethodEffort', optional=True),
'consistency_of_monitoring_id': lookup(row, 'ConsistencyOfMonitoring', optional=True),
'start_year': row.get('StartYear') or None,
'end_year': row.get('EndYear') or None,
'exclude_from_analysis': get_bool(row, 'NotInIndex', False, unknown_value_default=True, optional=True),
'suppress_aggregated_data': get_bool(row, 'SuppressAggregatedDataUntil', False, unknown_value_default=True, optional=True)
}
# In relaxed mode, silently skip rows without SourceID value
if args.relax and row['SourceID'].strip() in ('', 'NULL', 'NA'):
continue
r = session.execute("SELECT 1 FROM source WHERE id = :id", { 'id': data['source_id'] }).fetchall()
if len(r) == 0:
if args.relax:
log.warning("Skipping unknown source id: %s" % data['source_id'])
continue
else:
raise ValueError("Invalid source id: %s" % data['source_id'])
try:
if data['data_agreement_id']:
data['data_agreement_id'] = int(data['data_agreement_id'])
except:
if args.relax:
log.warning("Treating unknown AgreementSigned value as blank: %s" % data['data_agreement_id'])
data['data_agreement_id'] = None
else:
raise ValueError("Invalid AgreementSigned: %s" % data['data_agreement_id'])
if args.update_source_table:
def strip_and_warn(s):
stripped = s.strip(". ")
if s != stripped:
log.warning("Stripping leading/trailing space/periods from '%s'", s)
return stripped
data['authors'] = strip_and_warn(row['Authors'])
data['provider'] = strip_and_warn(row['SourceProvider'])
data['description'] = strip_and_warn(row['SourceDesc'])
session.execute("""UPDATE source SET authors = :authors, provider = :provider, description = :description WHERE id = :source_id""", data)
session.execute("""INSERT INTO data_source (
source_id,
taxon_id,
data_agreement_id,
objective_of_monitoring_id,
absences_recorded,
standardisation_of_method_effort_id,
consistency_of_monitoring_id,
start_year,
end_year,
exclude_from_analysis,
suppress_aggregated_data
) VALUES (
:source_id,
:taxon_id,
:data_agreement_id,
:objective_of_monitoring_id,
:absences_recorded,
:standardisation_of_method_effort_id,
:consistency_of_monitoring_id,
:start_year,
:end_year,
:exclude_from_analysis,
:suppress_aggregated_data
)""",
data
)
session.commit()
LOOKUPS = {
'ObjectiveOfMonitoring': {
'Monitoring for targeted conservation management': 4,
'Monitoring for general conservation management – ‘surveillance’ monitoring.': 3,
'Baseline monitoring': 2,
'Monitoring for community engagement': 1
},
'ConsistencyOfMonitoring': {
'Balanced; all (or virtually all) sites surveyed in each year sampled (no, or virtually no, site turnover)': 4,
'Imbalanced (low turnover); sites surveyed consistently through time as established, but new sites are added to program with time': 3,
'Imbalanced (high turnover); new sites are surveyed with time, but monitoring of older sites is often not maintained': 2,
'Highly Imbalanced (very high turnover); different sites surveyed in different sampling periods. Sites are generally not surveyed consistently through time (highly biased)': 1
},
'StandardisationOfMethodEffort': {
'Pre-defined sites/plots surveyed repeatedly through time using a single standardised method and effort across the whole monitoring program': 6,
'Pre-defined sites/plots surveyed repeatedly through time with methods and effort standardised within site units, but not across program - i.e. different sites surveyed have different survey effort/methods': 5,
'Pre-defined sites/plots surveyed repeatedly through time with varying methods and effort': 4,
'Data collection using standardised methods and effort but surveys not site-based (i.e. surveys spatially ad-hoc). Post-hoc site grouping possible - e.g. a lot of fixed area/time searches conducted within a region but not at pre-defined sites': 3,
'Data collection using standardised methods and effort but surveys not site-based (i.e. surveys spatially ad-hoc). Post-hoc site grouping not possible': 2,
'Unstandardised methods/effort, surveys not site-based': 1
}
}
def get_bool(row, column, default=None, unknown_value_default=None, optional=False):
raw_value = row.get(column)
if optional and raw_value is None:
return default
raw_value = raw_value.strip()
value = raw_value.lower()
if value in ('1', 'yes', 'true', 'y', 't'):
return True
if value in ('0', 'no', 'false', 'n', 'f'):
return False
if value in ('', 'na', 'null'):
return default
else:
log.warning("Unknown value for %s: '%s', defaulting to %s" % (column, raw_value, unknown_value_default))
return unknown_value_default
def lookup(row, column, optional=False):
value = row.get(column)
if optional and value is None:
return None
lookup = LOOKUPS[column]
if value in ('', 'NA', '0'):
return None
elif value.isdigit() and int(value) in lookup.values():
return value
elif value in lookup:
return lookup[value]
else:
log.warning("Unknown value for %s: '%s', defaulting to None" % (column, value))
return None
if __name__ == '__main__':
main()
| 2.359375
| 2
|
mover/_numberbatch.py
|
StefanJMU/SemEval2022_Task_8
| 0
|
12775722
|
<filename>mover/_numberbatch.py
import psycopg2
import numpy as np
numberbatch_config = {
'user': 'postgres',
'password': '<PASSWORD>',
'host': 'localhost',
'port' : 5432,
'database': 'Numberbatch'
}
frequency_config = {
'user': 'postgres',
'password': '<PASSWORD>',
'host': 'localhost',
'port' : 5432,
'database': 'Frequency'
}
#Numberbatch and Wordlex use sometimes different language identifiers.
#-> Artifact from database construction
lang_identifier_map = {
'en' : 'eng',
'fr' : 'fre',
'ar' : 'ara',
'tr' : 'tur'
}
def for_database(database_name):
def wrapper(function):
def func(s, *args, **kwargs):
if s.config['database'] == database_name:
return function(s, *args, **kwargs)
else :
print(f'Database {s.config["database"]} has no function {function}')
return func
return wrapper
class DatabaseConnection:
def __init__(self, config):
self.config = config
# Prepared statements for database construction
self.insert_prep = ("INSERT INTO vectors (lang,concept,embedding) VALUES (%s,%s,%s);")
self.insert_frequency_prep = ("INSERT INTO frequency (index,lang,word,freq) VALUES (%s,%s,%s,%s);")
self.make_partition_prep = ("CREATE TABLE {}_{} PARTITION OF {} FOR VALUES IN ('{}');")
self.make_index_prep = ("CREATE INDEX ON {}_{} ({});")
self.get_embedding_prep = ("SELECT embedding FROM vectors where lang='{}' and concept='{}'")
self.get_frequency_prep = ("SELECT freq FROM frequency where lang='{}' and word='{}'")
self.commit_counter = 0
def __enter__(self):
self.cnx = psycopg2.connect(**self.config)
self.cursor = self.cnx.cursor()
return self
def __exit__(self, type, value, traceback):
self.cursor.close()
self.cnx.close()
def make_partition(self, table, lang, index_key):
"""
Create new partition in the database for a new language
"""
self.cursor.execute(self.make_partition_prep.format(table,
lang,
table,
lang))
self.cursor.execute(self.make_index_prep.format(table,
lang,
index_key))
self.cnx.commit()
@for_database('Numberbatch')
def insert_concept(self, lang, concept, embedding_vector):
"""
Insert concept into the embedding database
Params
-----
lang : str -> language abbreviation ('de' etc.)
concept : str -> term
embedding_vector : str with structre '{float,...,float}'
"""
self.cursor.execute(self.insert_prep,
(lang, concept, embedding_vector))
self.commit_counter += 1
if self.commit_counter % 100 == 0:
self.cnx.commit()
@for_database('Frequency')
def insert_frequency(self, index, lang, word, freq):
"""
Insert concept into the embedding database
Params
-----
lang : str -> language abbreviation ('de' etc.)
word : str -> term
freq : float -> frequency of the word
"""
self.cursor.execute(self.insert_frequency_prep,
(index, lang, word, freq))
self.commit_counter += 1
if self.commit_counter % 100 == 0:
self.cnx.commit()
@for_database('Numberbatch')
def get_embedding(self, lang, concept):
"""
Retrieve embedding vector for concept. Specification of the language is
required due to overlapping of words in languages and for efficient access via
the PostGreSQL partition concept
"""
try:
self.cursor.execute(self.get_embedding_prep.format(lang, concept))
res = self.cursor.fetchone()
if res is not None :
return np.array(res[0]) #list of projected attributes returned
except:
#print("Error during embedding retrieval of concept {} of language {}. Returning NaN embedding.".format(concept,lang))
pass
res = np.empty((300,))
res[:] = np.NaN
return res
@for_database('Frequency')
def get_frequency(self, lang, concept):
"""
Get the document frequency for concept with respect to the most frequent word of lang, according to WordLex
"""
try:
self.cursor.execute(self.get_frequency_prep.format(lang, concept))
res = self.cursor.fetchone()
if res is not None:
return res[0] #list of projected attributes returned
except:
# Commit the current queries.
# After an error has been thrown the database will reject every further request until commitment
self.cnx.commit()
return np.NaN
@for_database('Numberbatch')
def embed(self, lang, concepts):
"""
TODO: A caching strategy can be used here as well
"""
if len(concepts) == 0:
raise Exception("Function embed received empty list of concepts")
vectors = []
for concept in concepts:
vectors.append(self.get_embedding(lang, concept))
self.cnx.commit()
return np.stack(vectors, axis=0)
@for_database('Frequency')
def frequencies(self, lang, concepts):
"""
WordLex uses sometimes different other language abbreviations than Numberbatch
TODO: Synchronize the databases in this regard
"""
if lang in lang_identifier_map :
lang = lang_identifier_map[lang]
freqs = []
for concept in concepts :
freqs.append(self.get_frequency(lang, concept))
self.cnx.commit()
return np.array(freqs)
| 2.375
| 2
|
dlpy/model_conversion/write_sas_code.py
|
aviolante/python-dlpy
| 1
|
12775723
|
<gh_stars>1-10
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
''' Write python code for creating the SAS model '''
# model/input layer definition
def write_input_layer(model_name='sas', layer_name='data', channels='-1',
width='-1', height='-1', scale='1.0'):
'''
Generate Python code defining a SAS deep learning input layer
Parameters
----------
model_name : string
Name for deep learning model
layer_name : string
Layer name
channels : string
number of input channels
width : string
image width
height : string
image height
scale : string
scaling factor to apply to raw image pixel data
Returns
-------
string
String representing Python code defining a SAS deep learning input layer
'''
out = [
'def sas_model_gen(s, input_crop_type=None, input_channel_offset=None, input_image_size=None):',
' # quick check for deeplearn actionset',
' actionset_list = s.actionsetinfo().setinfo.actionset.tolist()',
' actionset_list = [item.lower() for item in actionset_list]',
' if "deeplearn" not in actionset_list:s.loadactionset("deeplearn")',
' ',
' # quick error-checking and default setting',
' if (input_crop_type is None):',
' input_crop_type="NONE"',
' else:',
' if (input_crop_type.upper() != "NONE") and (input_crop_type.upper() != "UNIQUE"):',
' raise ValueError("input_crop_type can only be NONE or UNIQUE")',
'',
' if (input_image_size is not None):',
' channels = input_image_size[0]',
' if (len(input_image_size) == 2):',
' height = width = input_image_size[1]',
' elif (len(inputImageSize) == 3):',
' height,width = input_image_size[1:]',
' else:',
' raise ValueError("input_image_size must be a tuple with two or three entries")',
'',
' # instantiate model',
' s.buildModel(model=dict(name=' + repr(model_name) + ',replace=True),type="CNN")',
'',
' # input layer',
' nchannels=' + channels,
' if input_channel_offset is None and nchannels==3:',
' print("INFO: setting channel mean values to ImageNet means")',
' input_channel_offset = [103.939, 116.779, 123.68]',
' s.addLayer(model=' + repr(model_name) + ', name=' + repr(layer_name) + ',',
' layer=dict( type="input", nchannels=' + channels + ', width=' + width + ', height=' + height + ',',
' scale = ' + scale + ', randomcrop=input_crop_type, offsets=input_channel_offset))',
' elif input_channel_offset is not None:',
' s.addLayer(model=' + repr(model_name) + ', name=' + repr(layer_name) + ',',
' layer=dict( type="input", nchannels=' + channels + ', width=' + width + ', height=' + height + ',',
' scale = ' + scale + ', randomcrop=input_crop_type, offsets=input_channel_offset))',
' else:',
' s.addLayer(model=' + repr(model_name) + ', name=' + repr(layer_name) + ',',
' layer=dict( type="input", nchannels=' + channels + ', width=' + width + ', height=' + height + ',',
' scale = ' + scale + ', randomcrop=input_crop_type))'
]
return '\n'.join(out)
# convolution layer definition
def write_convolution_layer(model_name='sas', layer_name='conv', nfilters='-1',
width='3', height='3', stride='1', nobias='False',
activation='identity', dropout='0', src_layer='none',
padding='None'):
'''
Generate Python code defining a SAS deep learning convolution layer
Parameters
----------
model_name : string, optional
Name for deep learning model
layer_name : string, optional
Layer name
nfilters : string, optional
number of output feature maps
width : string, optional
image width
height : string, optional
image height
stride : string, optional
vertical/horizontal step size in pixels
nobias : string, optional
omit (True) or retain (False) the bias term
activation : string, optional
activation function
dropout : string, optional
dropout factor (0 < dropout < 1.0)
src_layer : string, optional
source layer(s) for the convolution layer
Returns
-------
string
'''
out = [
' s.addLayer(model=' + repr(model_name) + ', name=' + repr(layer_name) + ',',
' layer=dict(type="convolution", nfilters=' + nfilters + ', width=' + width + ', height=' + height + ',',
' stride=' + stride + ', nobias=' + nobias + ', act=' + repr(
activation) + ', dropout=' + dropout + ', padding=' + padding +'), \n',
' srcLayers=' + src_layer + ')'
]
return '\n'.join(out)
# batch normalization layer definition
def write_batch_norm_layer(model_name='sas', layer_name='bn',
activation='identity', src_layer='none'):
'''
Generate Python code defining a SAS deep learning batch normalization layer
Parameters
----------
model_name : string, optional
Name for deep learning model
layer_name : string, optional
Layer name
activation : string, optional
activation function
src_layer : string, optional
source layer(s) for the convolution layer
Returns
-------
string
'''
out = [
' s.addLayer(model=' + repr(model_name) + ', name=' + repr(layer_name) + ',',
' layer=dict( type="batchnorm", act=' + repr(activation) + '),',
' srcLayers=' + src_layer + ')'
]
return '\n'.join(out)
# pooling layer definition
def write_pooling_layer(model_name='sas', layer_name='pool',
width='2', height='2', stride='2', type='max',
dropout='0', src_layer='none', padding='None'):
'''
Generate Python code defining a SAS deep learning pooling layer
Parameters
----------
model_name : string, optional
Name for deep learning model
layer_name : string, optional
Layer name
width : string, optional
image width
height : string, optional
image height
stride : string, optional
vertical/horizontal step size in pixels
type : string, optional
pooling type
dropout : string, optional
dropout factor (0 < dropout < 1.0)
src_layer : string, optional
source layer(s) for the convolution layer
Returns
-------
string
'''
out = [
' s.addLayer(model=' + repr(model_name) + ', name=' + repr(layer_name) + ',',
' layer=dict(type="pooling", width=' + width + ', height=' + height + ',',
' stride=' + stride + ', pool=' + repr(type) + ', dropout=' + dropout + ',',
' padding=' + padding + '),',
' srcLayers=' + src_layer + ')'
]
return '\n'.join(out)
# residual layer definition
def write_residual_layer(model_name='sas', layer_name='residual',
activation='identity', src_layer='none'):
'''
Generate Python code defining a SAS deep learning residual layer
Parameters
----------
model_name : string, optional
Name for deep learning model
layer_name : string, optional
Layer name
activation : string, optional
activation function
src_layer : string, optional
source layer(s) for the convolution layer
Returns
-------
string
'''
out = [
' s.addLayer(model=' + repr(model_name) + ', name=' + repr(layer_name) + ',',
' layer=dict( type="residual", act="' + activation + '"),',
' srcLayers=' + src_layer + ')'
]
return '\n'.join(out)
# fully connected layer definition
def write_full_connect_layer(model_name='sas', layer_name='fullconnect',
nrof_neurons='-1', nobias='true',
activation='identity', type='fullconnect', dropout='0',
src_layer='none'):
'''
Generate Python code defining a SAS deep learning fully connected layer
Parameters
----------
model_name : string, optional
Name for deep learning model
layer_name : string, optional
Layer name
nrof_neurons : string, optional
number of output neurons
nobias : string, optional
omit (True) or retain (False) the bias term
activation : string, optional
activation function
type : string, optional
fully connected layer type (fullconnect or output)
dropout : string, optional
dropout factor (0 < dropout < 1.0)
src_layer : string, optional
source layer(s) for the convolution layer
Returns
-------
string
'''
if (type == 'fullconnect'):
out = [
' s.addLayer(model=' + repr(model_name) + ', name=' + repr(layer_name) + ',',
' layer=dict(type=' + repr(type) + ', n=' + nrof_neurons + ',',
' nobias=' + nobias + ', act=' + repr(activation) + ', dropout=' + dropout + '),',
' srcLayers=' + src_layer + ')'
]
else:
out = [
' s.addLayer(model=' + repr(model_name) + ', name=' + repr(layer_name) + ',',
' layer=dict(type=' + repr(type) + ', n=' + nrof_neurons + ',',
' nobias=' + nobias + ', act=' + repr(activation) + '),',
' srcLayers=' + src_layer + ')'
]
return '\n'.join(out)
# concat layer definition
def write_concatenate_layer(model_name='sas', layer_name='concat',
activation='identity', src_layer='none'):
'''
Generate Python code defining a SAS deep learning concat layer
Parameters
----------
model_name : string, optional
Name for deep learning model
layer_name : string, optional
Layer name
activation : string, optional
activation function
src_layer : string, optional
source layer(s) for the concat layer
Returns
-------
string
'''
out = [
' s.addLayer(model=' + repr(model_name) + ', name=' + repr(layer_name) + ',',
' layer=dict( type="concat", act="' + activation + '"),',
' srcLayers=' + src_layer + ')'
]
return '\n'.join(out)
# Python __main__ function
def write_main_entry(model_name):
'''
Generate Python code defining the __main__ Python entry point
Parameters
----------
model_name : string
Name for deep learning model
Returns
-------
string
'''
return ''
| 2.46875
| 2
|
python/dgl/nn/pytorch/conv/tagconv.py
|
Woooosz/dgl
| 2
|
12775724
|
"""Torch Module for Topology Adaptive Graph Convolutional layer"""
# pylint: disable= no-member, arguments-differ, invalid-name
import torch as th
from torch import nn
from .... import function as fn
class TAGConv(nn.Module):
r"""Topology Adaptive Graph Convolutional layer from paper `Topology
Adaptive Graph Convolutional Networks <https://arxiv.org/pdf/1710.10370.pdf>`__.
.. math::
\mathbf{X}^{\prime} = \sum_{k=0}^K \mathbf{D}^{-1/2} \mathbf{A}
\mathbf{D}^{-1/2}\mathbf{X} \mathbf{\Theta}_{k},
where :math:`\mathbf{A}` denotes the adjacency matrix and
:math:`D_{ii} = \sum_{j=0} A_{ij}` its diagonal degree matrix.
Parameters
----------
in_feats : int
Input feature size.
out_feats : int
Output feature size.
k: int, optional
Number of hops :math: `k`. (default: 2)
bias: bool, optional
If True, adds a learnable bias to the output. Default: ``True``.
activation: callable activation function/layer or None, optional
If not None, applies an activation function to the updated node features.
Default: ``None``.
Attributes
----------
lin : torch.Module
The learnable linear module.
"""
def __init__(self,
in_feats,
out_feats,
k=2,
bias=True,
activation=None):
super(TAGConv, self).__init__()
self._in_feats = in_feats
self._out_feats = out_feats
self._k = k
self._activation = activation
self.lin = nn.Linear(in_feats * (self._k + 1), out_feats, bias=bias)
self.reset_parameters()
def reset_parameters(self):
"""Reinitialize learnable parameters."""
gain = nn.init.calculate_gain('relu')
nn.init.xavier_normal_(self.lin.weight, gain=gain)
def forward(self, graph, feat):
r"""Compute topology adaptive graph convolution.
Parameters
----------
graph : DGLGraph
The graph.
feat : torch.Tensor
The input feature of shape :math:`(N, D_{in})` where :math:`D_{in}`
is size of input feature, :math:`N` is the number of nodes.
Returns
-------
torch.Tensor
The output feature of shape :math:`(N, D_{out})` where :math:`D_{out}`
is size of output feature.
"""
with graph.local_scope():
assert graph.is_homogeneous(), 'Graph is not homogeneous'
norm = th.pow(graph.in_degrees().float().clamp(min=1), -0.5)
shp = norm.shape + (1,) * (feat.dim() - 1)
norm = th.reshape(norm, shp).to(feat.device)
#D-1/2 A D -1/2 X
fstack = [feat]
for _ in range(self._k):
rst = fstack[-1] * norm
graph.ndata['h'] = rst
graph.update_all(fn.copy_src(src='h', out='m'),
fn.sum(msg='m', out='h'))
rst = graph.ndata['h']
rst = rst * norm
fstack.append(rst)
rst = self.lin(th.cat(fstack, dim=-1))
if self._activation is not None:
rst = self._activation(rst)
return rst
| 2.46875
| 2
|
examples/registration/demo.py
|
mli0603/lietorch
| 360
|
12775725
|
import sys
sys.path.append('../core')
import argparse
import torch
import cv2
import numpy as np
from viz import sim3_visualization
from lietorch import SO3, SE3, Sim3
from networks.sim3_net import Sim3Net
def normalize_images(images):
images = images[:, :, [2,1,0]]
mean = torch.as_tensor([0.485, 0.456, 0.406], device=images.device)
std = torch.as_tensor([0.229, 0.224, 0.225], device=images.device)
return (images/255.0).sub_(mean[:, None, None]).div_(std[:, None, None])
def load_example(i=0):
""" get demo example """
DEPTH_SCALE = 5.0
if i==0:
image1 = cv2.imread('assets/image1.png')
image2 = cv2.imread('assets/image2.png')
depth1 = np.load('assets/depth1.npy') / DEPTH_SCALE
depth2 = np.load('assets/depth2.npy') / DEPTH_SCALE
elif i==1:
image1 = cv2.imread('assets/image3.png')
image2 = cv2.imread('assets/image4.png')
depth1 = np.load('assets/depth3.npy') / DEPTH_SCALE
depth2 = np.load('assets/depth4.npy') / DEPTH_SCALE
images = np.stack([image1, image2], 0)
images = torch.from_numpy(images).permute(0,3,1,2)
depths = np.stack([depth1, depth2], 0)
depths = torch.from_numpy(depths).float()
intrinsics = np.array([320.0, 320.0, 320.0, 240.0])
intrinsics = np.tile(intrinsics[None], (2,1))
intrinsics = torch.from_numpy(intrinsics).float()
return images[None].cuda(), depths[None].cuda(), intrinsics[None].cuda()
@torch.no_grad()
def demo(model, index=0):
images, depths, intrinsics = load_example(index)
# initial transformation estimate
if args.transformation == 'SE3':
Gs = SE3.Identity(1, 2, device='cuda')
elif args.transformation == 'Sim3':
Gs = Sim3.Identity(1, 2, device='cuda')
depths[:,0] *= 2**(2*torch.rand(1) - 1.0).cuda()
images1 = normalize_images(images)
ests, _ = model(Gs, images1, depths, intrinsics, num_steps=12)
# only care about last transformation
Gs = ests[-1]
T = Gs[:,0] * Gs[:,1].inv()
T = T[0].matrix().double().cpu().numpy()
sim3_visualization(T, images, depths, intrinsics)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--transformation', default='SE3', help='checkpoint to restore')
parser.add_argument('--ckpt', help='checkpoint to restore')
args = parser.parse_args()
model = Sim3Net(args)
model.load_state_dict(torch.load(args.ckpt))
model.cuda()
model.eval()
# run two demos
demo(model, 0)
demo(model, 1)
| 2.390625
| 2
|
configs/tutorial/caches.py
|
DownloadADuck/gem5-experiments
| 0
|
12775726
|
<reponame>DownloadADuck/gem5-experiments<filename>configs/tutorial/caches.py<gh_stars>0
#Caches configuration file
#We first import the simObj we are going to extend in this file
import m5
from m5.objects import Cache
#adding common scipts to our path
#m5.util.addToPath('../../')
#from common import SimpleOpts
#We can treat the BaseCache object like any python class and extend it
#We start by making the L1 cache
class L1Cache(Cache):
assoc = 2
tag_latency = 2
data_latency = 2
response_latency = 2
mshrs = 4
tgts_per_mshr = 20
def connectCPU(self, cpu):
#This must be defined in a subclass
raise NotImplementedError
def connectBus(self, bus):
self.mem_side = bus.cpu_side_ports
#Here we set values of DefaultCache that don't have default values
#All the possible parameters are in the source code of SimObj
#Sub classes of L1Cache; the L1DCache and L1ICache
class L1ICache(L1Cache):
#sets default size
size = '16kB'
#SimpleOpts.add_option('--l1i_size', help="L1 instruction cache size. Default: %s" %size)
def connectCPU(self, cpu):
#connect this cache s port to a CPU icache port
self.cpu_side = cpu.icache_port
class L1DCache(L1Cache):
#sets default size
size = '64kB'
#SimpleOpts.add_option('--l1d_size', help="L1 data cache size. Default: %s" %size)
def connectCPU(self, cpu):
self.cpu_side = cpu.dcache_port
#We also create and L2 with reasonable parameters
class L2Cache(Cache):
size = '256kB'
assoc = 8
tag_latency = 20
data_latency = 20
response_latency = 20
mshrs = 20
tgts_per_mshr = 12
#SimpleOpts.add_option('--l2_size', help="L2 cache size. Default: %s" %size)
def connectCPUSideBus(self, bus):
self.cpu_side = bus.mem_side_ports
def connectMemSideBus(self, bus):
self.mem_side = bus.cpu_side_ports
#We add helper functions to connect the CPU to the cache and caches to a bus
| 2.515625
| 3
|
libretto/migrations/0042_auto_20190830_2210.py
|
adrienlachaize/dezede
| 15
|
12775727
|
<filename>libretto/migrations/0042_auto_20190830_2210.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-08-30 20:10
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('libretto', '0041_migrate_files'),
]
operations = [
migrations.RemoveField(
model_name='fichier',
name='extract',
),
migrations.RemoveField(
model_name='fichier',
name='owner',
),
migrations.RemoveField(
model_name='fichier',
name='source',
),
migrations.AlterModelOptions(
name='source',
options={'ordering': ('date', 'titre', 'numero', 'page', 'lieu_conservation', 'cote'), 'permissions': (('can_change_status', 'Peut changer l’état'),), 'verbose_name': 'source', 'verbose_name_plural': 'sources'},
),
migrations.DeleteModel(
name='Fichier',
),
]
| 1.546875
| 2
|
testsuite/modulegraph-dir/global_import.py
|
xoviat/modulegraph2
| 9
|
12775728
|
<reponame>xoviat/modulegraph2
import no_imports
def foo():
pass
class MyClass:
pass
| 1.070313
| 1
|
train.py
|
tkx68/Resume-NER
| 0
|
12775729
|
<reponame>tkx68/Resume-NER
import argparse
import numpy as np
import torch
from transformers import DistilBertForTokenClassification, DistilBertTokenizerFast
from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler
from torch.optim import Adam
from utils import get_special_tokens, trim_entity_spans, convert_goldparse, ResumeDataset, tag2idx, idx2tag, get_hyperparameters, train_and_val_model
parser = argparse.ArgumentParser(description='Train Bert-NER')
parser.add_argument('-e', type=int, default=5, help='number of epochs')
parser.add_argument('-o', type=str, default='.', help='output path to save model state')
args = parser.parse_args().__dict__
output_path = args['o']
MAX_LEN = 500
NUM_LABELS = 12
EPOCHS = args['e']
MAX_GRAD_NORM = 1.0
MODEL_NAME = 'bert-base-uncased'
# TOKENIZER = DistilBertTokenizerFast('./vocab/vocab.txt', lowercase=True)
TOKENIZER = DistilBertTokenizerFast.from_pretrained('distilbert-base-uncased', lowercase=True)
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
data = trim_entity_spans(convert_goldparse('data/Resumes.json'))
total = len(data)
train_data, val_data = data[:180], data[180:]
train_d = ResumeDataset(train_data, TOKENIZER, tag2idx, MAX_LEN)
val_d = ResumeDataset(val_data, TOKENIZER, tag2idx, MAX_LEN)
train_sampler = RandomSampler(train_d)
train_dl = DataLoader(train_d, sampler=train_sampler, batch_size=8)
val_dl = DataLoader(val_d, batch_size=2)
# model = DistilBertForTokenClassification.from_pretrained(MODEL_NAME, num_labels=len(tag2idx))
model = DistilBertForTokenClassification.from_pretrained('distilbert-base-uncased', num_labels=NUM_LABELS)
model.resize_token_embeddings(len(TOKENIZER)) # from here: https://github.com/huggingface/transformers/issues/1805
model.to(DEVICE)
optimizer_grouped_parameters = get_hyperparameters(model, True)
optimizer = Adam(optimizer_grouped_parameters, lr=3e-5)
train_and_val_model(model, TOKENIZER, optimizer, EPOCHS, idx2tag, tag2idx, MAX_GRAD_NORM, DEVICE, train_dl, val_dl)
torch.save(
{
"model_state_dict": model.state_dict()
},
f'{output_path}/model-state.bin',
)
| 2.140625
| 2
|
modules/spider/spider/spiders/__init__.py
|
Ladder-Climbers/mayne-code
| 0
|
12775730
|
# This package will contain the spiders of your Scrapy project
#
# Please refer to the documentation for information on how to create and manage
# your spiders.
DOUBAN_COOKIE = {
"__gads": "ID=2421173a5ca57aed-228b4c29c5c800c1:T=1621494084:RT=1621494084:S=ALNI_MaJlRkH7cibeVPuRhGgoy4NehQdpw",
"__utma": "81379588.766923198.1621432056.1634626277.1634642692.15",
"__utmv": "30149280.23826",
"__utmz": "81379588.1634626277.14.8.utmcsr=cn.bing.com|utmccn=(referral)|utmcmd=referral|utmcct=/",
"__yadk_uid": "mpCqcudA39rNIrjPG2dzOaZVU9YKWwMV",
"_ga": "GA1.2.2128100270.1634613032",
"_ga_RXNMP372GL": "GS1.1.1634613031.1.0.1634613033.58",
"_pk_id.100001.3ac3": "e2d5b8eca75bca93.1621432056.15.1634642691.1634626483.",
"_pk_ref.100001.3ac3": "[\"\",\"\",1634642691,\"https://cn.bing.com/\"]",
"_vwo_uuid_v2": "DF8649AFF718CAD037CCABE9EC9DA0284|35908c2cbe71e8172adb1d18e8eb654d",
"ap_v": "0,6.0",
"bid": "nvSOKb3e_kY",
"ck": "vPPv",
"ct": "y",
"dbcl2": "\"238268017:3kJuTVIhGR8\"",
"douban-fav-remind": "1",
"gr_cs1_816e1a27-0db8-472b-bedd-a0ce47a62b39": "user_id:0",
"gr_cs1_a0853268-d85f-4a95-90d5-70006915ab52": "user_id:1",
"gr_session_id_22c937bbd8ebd703f2d8e9445f7dfd03": "a0853268-d85f-4a95-90d5-70006915ab52",
"gr_session_id_22c937bbd8ebd703f2d8e9445f7dfd03_a0853268-d85f-4a95-90d5-70006915ab52": "true",
"gr_user_id": "322312de-376f-4247-911e-4d511f4f93bd",
"ll": "\"118282\"",
"push_doumail_num": "0",
"push_noty_num": "0",
"viewed": "\"1000647_35541390_1000001_35252459_35378783_1043815_1200840_4913064_26261735_30348068\""
}
| 1.46875
| 1
|
utils/visualization_utils.py
|
Flipajs/FERDA
| 1
|
12775731
|
import numpy as np
import matplotlib.cm as cm
import matplotlib.pylab as plt
def generate_colors(count):
cm = plt.get_cmap('gist_rainbow')
return np.array([cm(1. * i / count) for i in range(count)]).astype(float)
def get_q_color(id, ant_num):
from PyQt4 import QtGui
r, g, b = get_color(id, ant_num)
return QtGui.QColor(r, g, b)
def get_color(id, ant_num):
colors = cm.rainbow(np.linspace(0, 1, ant_num))
return int(colors[id][0] * 255), int(colors[id][1] * 255), int(colors[id][2] * 255)
def get_opacity(current_depth, max_depth):
return float((max_depth - current_depth) + float(current_depth/max_depth))/max_depth/2
def get_contrast_color(r, g, b):
if (r+g+b)/3 < 128:
return 250, 250, 255
return 5, 0, 5
| 2.578125
| 3
|
dev_5/mainapp/models.py
|
Floou/dev-5
| 0
|
12775732
|
from django.db import models
class FullName(models.Model):
full_name = models.CharField(verbose_name='ФИО', max_length=256)
def __str__(self):
return self.full_name
class Meta:
verbose_name = 'ФИО'
verbose_name_plural = 'ФИО'
| 2.171875
| 2
|
test/unsupported/03/BibtexObjectTest03.py
|
wleoncio/cff-converter-python
| 1
|
12775733
|
from cffconvert import BibtexObject
import unittest
import os
import ruamel.yaml as yaml
from test.contracts.BibtexObject import Contract
class BibtexObjectTest(Contract, unittest.TestCase):
def setUp(self):
fixture = os.path.join(os.path.dirname(__file__), "CITATION.cff")
with open(fixture, "r") as f:
cffstr = f.read()
cff_object = yaml.safe_load(cffstr)
self.bo = BibtexObject(cff_object, initialize_empty=True)
def test_author(self):
# CFF file is not valid, hence contract does not apply
pass
def test_check_cff_object(self):
with self.assertRaises(ValueError) as context:
self.bo.check_cff_object()
self.assertTrue('Missing key "cff-version" in CITATION.cff file.' in str(context.exception))
def test_doi(self):
# CFF file is not valid, hence contract does not apply
pass
def test_month(self):
# CFF file is not valid, hence contract does not apply
pass
def test_print(self):
# CFF file is not valid, hence contract does not apply
pass
def test_title(self):
# CFF file is not valid, hence contract does not apply
pass
def test_url(self):
# CFF file is not valid, hence contract does not apply
pass
def test_year(self):
# CFF file is not valid, hence contract does not apply
pass
| 2.4375
| 2
|
TableCreator.py
|
DanieleFedeli/Wikipedia_missing_links
| 0
|
12775734
|
from nltk.corpus import stopwords
import requests
import re
import nltk
nltk.download('stopwords')
WIKI_API_URL = "https://en.wikipedia.org/w/api.php"
inputTitleRepr = './ShortReprLists'
def retrieveCategoryFromJson(pages):
categories = []
for k, v in pages.items():
for cat in v['categories']:
titleCategory = cat['title'].replace('Category:', '')
if 'All' in titleCategory:
continue
if 'Pages' in titleCategory:
continue
if 'Articles' in titleCategory:
continue
if 'Wikipedia' in titleCategory:
continue
if 'Wikidata' in titleCategory:
continue
categories.append(titleCategory)
return list(set(categories))
def FindCategory(session, title):
category = []
PARAMS = {
"action": "query",
"format": "json",
"prop": "categories",
"titles": title
}
response = session.get(url=WIKI_API_URL, params=PARAMS)
data = response.json()
pages = data['query']['pages']
category.extend(retrieveCategoryFromJson(pages))
while "continue" in data:
clcontinue = data["continue"]["clcontinue"]
PARAMS["clcontinue"] = clcontinue
response = session.get(url=WIKI_API_URL, params=PARAMS)
data = response.json()
pages = data['query']['pages']
category.extend(retrieveCategoryFromJson(pages))
return list(set(category))
def getAllBacklinksFromFile(filename):
backlinks = []
row_number = 0
with open(inputTitleRepr+'/'+filename+'.txt.txt', 'r') as f:
for row in f:
row_number += 1
splitted = row.split(' -')
splitted = splitted[0].split(' ')
backlinks.extend(splitted)
return (row_number, backlinks)
def routine(session, title):
print('Processing {}...'.format(title))
categoryOfTitle = FindCategory(session, title)
dictOfCategories = {el.capitalize(): 0 for el in categoryOfTitle}
infoFromBacklinks = getAllBacklinksFromFile(title)
backlinksNumber = infoFromBacklinks[0]
backlinks = infoFromBacklinks[1]
for bl in backlinks:
blCategories = FindCategory(session, bl)
for cat in blCategories:
if cat.capitalize() in dictOfCategories:
#print('{} is in'.format(cat.capitalize()))
dictOfCategories[cat.capitalize()] += 1
# print('--------')
maxCat = max(dictOfCategories, key=dictOfCategories.get)
cSim = dictOfCategories[maxCat]/backlinksNumber
print('{}\t{}\t{}'.format(title, maxCat, round(cSim, 2)), file=f)
session = requests.Session()
titles = ['Official_(tennis)', 'Maria_Pepe', 'SEAT_Arona', 'Dodge_Coronet',
'Christmas_window', 'Last.fm', 'Traditional_bluegrass']
with open('output.txt', 'w') as f:
print('Entity\t\tCategory\t\tc-Similarity\n', file=f)
for title in titles:
routine(session, title)
| 3.15625
| 3
|
pipeline/pipeline.py
|
iosonofabio/quakelab_containers
| 1
|
12775735
|
<reponame>iosonofabio/quakelab_containers<filename>pipeline/pipeline.py
#!/usr/bin/env python
# vim: fdm=indent
'''
author: <NAME>
date: 05/04/17
content: Skeleton pipeline
'''
# Modules
import argparse
# Script
if __name__ == '__main__':
pa = argparse.ArgumentParser(description='pipeline')
print('Skeleton pipeline')
| 1.554688
| 2
|
fixture/record_related_tab.py
|
Eduard-z/SF_project_pytest
| 0
|
12775736
|
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class RecordRelatedTab:
def __init__(self, app):
self.app = app
def expand_Show_more_actions_dropdown_on_record_page(self):
driver = self.app.driver
dropdown_locator = "//div[contains(@class,'windowViewMode-normal')]//span[contains(text(),'Show')]/.."
dropdown_element = WebDriverWait(driver, 5).until(EC.element_to_be_clickable((By.XPATH, dropdown_locator)))
dropdown_element.click()
def click_Edit_button(self):
driver = self.app.driver
driver.find_element_by_xpath("//*[text()='Edit']/parent::a").click()
def click_Edit_button_on_record_page(self):
driver = self.app.driver
# if "Edit" button is already displayed or not
if len(driver.find_elements_by_xpath("//*[text()='Edit']/parent::a")):
self.click_Edit_button()
else:
self.expand_Show_more_actions_dropdown_on_record_page()
self.click_Edit_button()
def check_that_page_title_contains_record_name(self, record_name):
driver = self.app.driver
print("\n" + driver.title)
assert record_name in driver.title, "Wrong page title :)"
def switch_to_Details_tab_of_record(self):
driver = self.app.driver
details_tab_element = WebDriverWait(driver, 5).until(EC.element_to_be_clickable((By.XPATH,
"//div[contains(@class,'active')]//a[text()='Details']")))
details_tab_element.click()
| 2.578125
| 3
|
mail/mail.py
|
TsengWen/Weekly
| 0
|
12775737
|
<reponame>TsengWen/Weekly
class Mail:
def __init__(self, prot, *argv):
self.prot = prot(*argv)
def login(self, account, passwd):
self.prot.login(account, passwd)
def send(self, frm, to, subject, content):
self.prot.send(frm, to, subject, content)
def quit(self):
self.prot.quit()
| 2.484375
| 2
|
Assignment 11/task1.py
|
drewriker/USU-CS-1400
| 2
|
12775738
|
# <NAME>
# CS1400 - LW2 XL
# Assignment #11
from modules.orbian import Orbian
from time import sleep
from random import randint
from random import shuffle # Hint hint
def main():
print("WELCOME TO ORBIAN FAMILY")
print()
family = []
input("Hit Enter to Create the First Four Orbians")
for i in range(0, 4):
name = input("\tEnter a name for Orbian " + str(i + 1) + ": ")
# The first four Orbians are created with random values
headRadius = randint(2, 5)
bodyRadius = randint(3, 8)
bodyHeight = randint(5, 15)
family.append(Orbian(name, headRadius, bodyRadius, bodyHeight))
print("\tCreating your Orbian Family", end="")
thinking()
done = False
while not done:
print()
print("Menu")
print("\t1) Meet Orbian Family")
print("\t2) Compare Orbians")
print("\t3) Orbian Info")
print("\t4) Create Orbian Baby")
print("\t5) Send to Pasture")
print("\t6) Orbian Thanos")
print("\t7) Quit")
choice = int(input("Choose an option: "))
print()
if choice == 1:
listFamily(family)
elif choice == 2:
compare(family)
elif choice == 3:
info(family)
elif choice == 4:
createBaby(family)
elif choice == 5:
toPasture(family)
elif choice == 6:
thanosSnap(family)
elif choice == 7:
done = True
print("Thanks for playing Orbian Family!!!")
def thinking():
for i in range(5):
print(".", end="")
sleep(0.5) # You can comment this out while testing to make things go faster
print()
def selectOrbian(famList, selected=None):
count = 1
for i in famList:
print("\t" + str(count) + ") " + i.getName(), end="")
if selected is not None and i is selected:
print(" (already selected)")
else:
print()
count += 1
return famList[int(input("Select an Orbian: ")) - 1] # Returns an Orbian object
# DO NOT MODIFY ANY CODE ABOVE THIS LINE ##############
# Define/Complete the functions below ###################
def listFamily(famList):
# <<<<<<<<<<<<<< Write code to list the Orbian family >>>>>>>>>>>>>>>
for i in famList:
print("I am Orbian " + str(i))
def compare(famList):
orb1 = selectOrbian(famList)
orb2 = selectOrbian(famList, orb1)
# DO NOT MODIFY THIS FUNCTION BEYOND THIS LINE ############
if (orb1 == orb2):
print("\tOrbian " + orb1.getName() + " is equal to Orbian " + orb2.getName())
elif (orb1 > orb2):
print("\tOrbian " + orb1.getName() + " is bigger than Orbian " + orb2.getName())
else:
print("\tOrbian " + orb1.getName() + " is smaller than Orbian " + orb2.getName())
def createBaby(famList):
# <<<<<<<<<<<<<< Write code to select two orbians to be parents >>>>>>>>>>>>>>>
orb1 = selectOrbian(famList)
orb2 = selectOrbian(famList, orb1)
# ########## DO NOT MODIFY THIS FUNCTION BEYOND THIS LINE ############
famList.append(orb1 + orb2)
print("\tGreetings Orbian " + famList[len(famList) - 1].getName())
def info(famList):
# ########## DO NOT MODIFY THIS FUNCTION ############
print("Select an Orbian to view")
orbian = selectOrbian(famList)
print("Orbian " + orbian.getName() + " is " + str(orbian.getAge()) + " zungs old")
print("\tand is " + str(orbian.getVolume()) + " zogs, and " + str(len(orbian)) + " zings")
def toPasture(famList):
orb = selectOrbian(famList)
orbIndex = famList.index(orb)
famList.pop(orbIndex)
print("\nWe wish " + orb.getName() + " well in his future adventures!\nThey will be missed.")
def thanosSnap(famList):
print("Uh oh. Orbian Thanos just snapped his fingers")
thinking()
half = len(famList) // 2
shuffle(famList)
for i in range(half):
famList.pop()
main()
| 3.640625
| 4
|
model.py
|
rezendi/vortext
| 14
|
12775739
|
<gh_stars>10-100
import datetime, logging
from google.appengine.api import memcache
from google.appengine.ext import db, ndb
import tweepy
TWITTER_CONSUMER_KEY = "YOUR_CONSUMER_KEY"
TWITTER_CONSUMER_SECRET = "YOUR_CONSUMER_SECRET"
STATUS_CREATED = 1
STATUS_FETCH_INITIATED = 10
STATUS_FETCH_COMPLETE = 100
STATUS_PARSE_URLS = 200
STATUS_EXPANDING_URLS = 300
STATUS_URLS_EXPANDED = 400
STATUS_COMPLETE = 500
STATUS_FILE_UPLOADED = 1000
STATUS_FILE_UNPACKED = 2000
MONTHS={1:'Jan', 2:'Feb', 3:'Mar', 4:'Apr', 5:'May', 6:'Jun', 7:'Jul', 8:'Aug', 9:'Sep', 10:'Oct', 11:'Nov', 12:'Dec'}
class Account(ndb.Model):
time_created = ndb.DateTimeProperty(auto_now_add=True)
time_edited = ndb.DateTimeProperty(auto_now=True)
last_login = ndb.DateTimeProperty()
status = ndb.IntegerProperty()
name = ndb.StringProperty()
email = ndb.StringProperty()
privacy = ndb.IntegerProperty()
twitter_handle = ndb.StringProperty()
twitter_key = ndb.StringProperty()
twitter_secret = ndb.StringProperty()
twitter_max = ndb.IntegerProperty()
urls = ndb.JsonProperty(compressed=True)
keywords = ndb.JsonProperty(compressed=True)
timeline = ndb.JsonProperty(compressed=True)
def is_private(self):
return False if self.privacy is None else self.privacy>0
#it's more than possible that this field is excessively overloaded
def update_status(self, new_status):
if self.status is None:
self.status = new_status
elif new_status in [STATUS_FETCH_INITIATED, STATUS_FETCH_COMPLETE, STATUS_EXPANDING_URLS, STATUS_COMPLETE]:
self.status = (self.status/1000)*1000 + new_status
elif new_status/100==0:
self.status = (self.status/100)*100 + new_status
elif new_status/1000==0:
self.status = (self.status/1000)*1000 + new_status + (self.status % 100)
elif new_status/1000>0:
self.status = new_status + (self.status % 1000)
if self.key is None:
self.put()
memcache.set("%s_status" % self.key.urlsafe(), self.status, 10800)
def newest_raw_data(self):
q = RawData.query(ndb.AND(RawData.account_key == self.key,RawData.source=="twitter"))
return q.order(-RawData.time_created).get()
def has_uploaded_data(self):
uploaded = RawData.query(ndb.AND(RawData.account_key == self.key,RawData.source=="twitter_upload"))
uploaded = uploaded.order(-RawData.time_created).get()
return uploaded is not None
def raw_uploaded_data(self):
uploaded = RawData.query(ndb.AND(RawData.account_key == self.key,RawData.source=="twitter_upload"))
uploaded = uploaded.order(-RawData.time_created).get()
if uploaded is None:
uploaded = RawData(account_key = self.key, source = "twitter_upload", data = [])
uploaded.put()
return uploaded
def newest_upload(self):
return UploadedFile.query(UploadedFile.account_key == self.key).order(-UploadedFile.time_created).get()
def is_private(self):
return self.privacy==1
def show_name(self):
return self.name if self.name is not None else ""
def show_email(self):
return self.email if self.email is not None else ""
class RawData(ndb.Model):
time_created = ndb.DateTimeProperty(auto_now_add=True)
time_edited = ndb.DateTimeProperty(auto_now=True)
account_key = ndb.KeyProperty(Account)
source = ndb.StringProperty()
data = ndb.JsonProperty(compressed=True)
class UploadedFile(ndb.Model):
time_created = ndb.DateTimeProperty(auto_now_add=True)
time_edited = ndb.DateTimeProperty(auto_now=True)
account_key = ndb.KeyProperty(Account)
blob_key = ndb.BlobKeyProperty()
source = ndb.StringProperty()
## Access methods
def account_for(keystring):
if keystring is None or keystring=='':
return None
key = ndb.Key(urlsafe = keystring)
account = key.get()
if account is not None:
if account.timeline is None: account.timeline = []
if account.keywords is None: account.keywords = []
if account.urls is None: account.urls = {}
return account
def twitter_for(account):
if account is not None:
return twitter_with(account.twitter_key, account.twitter_secret)
return None
def twitter_with(key, secret):
auth = tweepy.OAuthHandler(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET, secure=True)
auth.set_access_token(key, secret)
twitter = tweepy.API(auth, parser=tweepy.parsers.RawParser())
return twitter
def url_cache(rawkey):
cache = RawData.query(RawData.account_key == None).order(-RawData.time_created).get()
if cache is not None:
try:
cache_size = len(cache._to_pb().Encode())
logging.info("cache size %s" % cache_size)
if cache_size > 960000:
logging.info("creating new cache")
cache = None
except Exception, ex:
logging.warn("error checking cache size: %s" % ex)
if cache is None:
cache = RawData(account_key = None, source = "twitter", data = {})
cache.put()
return cache
## Convenience methods
class DictObj(object):
def __getattr__(self, attr):
return self.__dict__.get(attr)
def __getitem__(self, item):
return self.__dict__.get(item)
def __repr__(self):
return '%s' % self.__dict__
def unicodize(s):
if s is None:
return ""
elif not isinstance(s, str):
return s
elif not isinstance(s, unicode):
return unicode(s,"utf-8",errors="ignore")
return s
def now():
return datetime.datetime.now()
def status_string(status):
if status is None: return "None"
if status == STATUS_CREATED: return "Created"
if status == STATUS_FILE_UNPACKED + STATUS_COMPLETE: return "All Tweets Parsed"
if status % 100 == STATUS_FETCH_INITIATED: return "Fetch initiated, please wait..."
if status % 100 > STATUS_FETCH_INITIATED: return "Fetched %s tweets, please wait..." % ((status % 100-STATUS_FETCH_INITIATED)*200)
if status % 1000 == STATUS_FETCH_COMPLETE: return "Fetch Complete, please wait..."
if status % 1000 == STATUS_PARSE_URLS: return "Parsing URLs, please wait..."
if status % 1000 == STATUS_EXPANDING_URLS: return "Expanding URLs"
if status % 1000 == STATUS_URLS_EXPANDED: return "URLS Expanded"
if status/1000 == STATUS_FILE_UPLOADED/1000: return "File Uploaded"
if status/1000 == STATUS_FILE_UNPACKED/1000: return "File Unpacked"
if status % 1000 == STATUS_COMPLETE: return "Parse Complete"
return "Unknown"
| 2.375
| 2
|
test/keras-rl/actor.py
|
showkeyjar/AutoMakeHuman
| 13
|
12775740
|
<reponame>showkeyjar/AutoMakeHuman
#操作器
import keras
def create_actor_network(self, state_size, action_dim):
print("Now we build the model")
S = Input(shape=[state_size])
h0 = Dense(HIDDEN1_UNITS, activation='relu')(S)
h1 = Dense(HIDDEN2_UNITS, activation='relu')(h0)
Steering = Dense(1, activation='tanh', init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h1)
Acceleration = Dense(1, activation='sigmoid', init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h1)
Brake = Dense(1, activation='sigmoid', init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h1)
V = merge([Steering, Acceleration, Brake], mode='concat')
model = Model(input=S, output=V)
print("We finished building the model")
return model, model.trainable_weights, S
rl.agents.ddpg.DDPGAgent(nb_actions, actor, critic, critic_action_input, memory, gamma=0.99, batch_size=32, nb_steps_warmup_critic=1000, nb_steps_warmup_actor=1000, train_interval=1, memory_interval=1, delta_range=None, delta_clip=inf, random_process=None, custom_model_objects={}, target_model_update=0.001)
| 2.71875
| 3
|
src/dbmeta/tuple_store.py
|
Jon-Burr/dbobj
| 0
|
12775741
|
<gh_stars>0
from .store import Store, SeqStore, AssocStore, MutableSeqStore, MutableAssocStore
from .column import read_identity
from future.utils import iteritems
class TupleStore(Store):
""" Store that stores data internally as namedtuples """
def __init__(self, data=None, store_type=None, **kwargs):
super(TupleStore, self).__init__(**kwargs)
if data is not None:
self.from_dict(data, store_type)
def _dict_to_tuple(self, data):
""" Read a tuple from a dictionary """
return tuple(
read_identity(c.name, data, c._desc.default, None)
for c in self._columns)
def _remote_to_tuple(self, data, store_type):
""" Read a tuple from remote store data """
return tuple(c.read_from(data, store_type) for c in self._columns)
def _remote_from_tuple(self, tup, store_type):
""" Convert a tuple to a dictionary for sending to a remote store """
data = {}
for c in self._columns:
c.write_to(tup[c.index], data, store_type)
return data
def __getitem__(self, idx_pair):
row_idx, col_idx = idx_pair
return self._data[row_idx][col_idx]
def __len__(self):
return len(self._data)
class TupleSeqStore(TupleStore, SeqStore):
""" Sequential store that stores data internally as namedtuples """
def __init__(self, **kwargs):
""" Create the store """
self._data = []
super(TupleSeqStore, self).__init__(**kwargs)
def from_remote(self, data, store_type):
""" Update the internal data store from the supplied remote store data """
self._data = [self._remote_to_tuple(d, store_type) for d in data]
def to_remote(self, store_type):
""" Convert the internal data store to a tuple of dicts """
return tuple(self._remote_from_tuple(t, store_type) for t in self._data)
class MutableTupleSeqStore(TupleSeqStore, MutableSeqStore):
""" Mutable sequential store that stores data internally as namedtuples """
def append(self, row_data):
self._data.append(self._dict_to_tuple(row_data))
def __setitem__(self, idx_pair, value):
row_idx, col_idx = idx_pair
self._data[row_idx] = tuple(
value if i == col_idx else v
for (i, v) in enumerate(self._data[row_idx]))
def __delitem__(self, row_idx):
del self._data[row_idx]
MutableSeqStore.__delitem__(self, row_idx)
class TupleAssocStore(TupleStore, AssocStore):
""" Associative store that stores data internally as namedtuples """
def __init__(self, **kwargs):
""" Create the store """
self._data = {}
super(TupleAssocStore, self).__init__(**kwargs)
def from_dict(self, data, store_type):
self._data = {
self._index_column.read_func(k, store_type):
self._remote_to_tuple(v, store_type)
for k, v in iteritems(data)}
def to_dict(self, store_type):
return {
self._index_column.write_func(k, store_type): \
self._remote_from_tuple(t, store_type)
for k, t in iteritems(self._data)}
def __iter__(self):
return iter(self._data)
def __contains__(self, row_idx):
return row_idx in self._data
class MutableTupleAssocStore(TupleAssocStore, MutableAssocStore):
""" Mutable associative store that stores data internally as namedtuples """
def add(self, index, row_data):
if index in self:
raise KeyError(
"Attempting to add pre-existing index {0}!".format(index) )
self._data[index] = self._dict_to_tuple(row_data)
def __setitem__(self, idx_pair, value):
row_idx, col_idx = idx_pair
self._data[row_idx] = tuple(
value if i == col_idx else v
for (i, v) in enumerate(self._data[row_idx]))
def __delitem__(self, row_idx):
del self._data[row_idx]
| 2.6875
| 3
|
scripts/1_1_BengaleseFinch_Sakata_Parsing.py
|
AvisP/AVGN_Avishek
| 0
|
12775742
|
<filename>scripts/1_1_BengaleseFinch_Sakata_Parsing.py
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 2 17:56:10 2020
@author: AvishekPaul
"""
##### SCRIPT 1
from avgn.utils.general import prepare_env
prepare_env()
import tqdm
import pandas as pd
pd.options.display.max_columns = None
import librosa
from datetime import datetime
import numpy as np
import avgn
from avgn.custom_parsing.bengalese_finch_sakata import generate_json_wav
from avgn.utils.paths import DATA_DIR
######## Load data in original Format
DATASET_ID = 'bengalese_finch_sakata'
# create a unique datetime identifier for the files output by this notebook
DT_ID = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
DT_ID
DSLOC = avgn.utils.paths.Path('I:/Avishek_segmentation/BrownBlue')
DSLOC
WAVFILES = list((DSLOC).expanduser().glob('*/[!.]*.wav'))
len(WAVFILES), WAVFILES[0]
####### parse MAT and create wav/JSON
# import h5py as h5
last_indv = 'new_data'
wav_num = 0
for wav_file in tqdm.tqdm(WAVFILES):
print(wav_file)
indv = wav_file.parent.stem
try:
[song,rate] = librosa.core.load(wav_file)
except:
print(wav_file.parts[-1] + "failed")
continue
if indv != last_indv:
wav_num = 0
last_indv = indv
else:
wav_num += 1
generate_json_wav(indv, wav_file.parts[-1], wav_num, song, int(rate), DT_ID)
### -------------------------------------------------------------------------------------------------------######
## SEGMENTATION USING DYNAMIC THRESHOLDING
import matplotlib.pyplot as plt
import numpy as np
import warnings
warnings.filterwarnings(action='once')
from avgn.utils.audio import load_wav, read_wav
from avgn.signalprocessing.filtering import butter_bandpass_filter
from avgn.signalprocessing.spectrogramming import spectrogram
from avgn.song_segmentation.dynamic_thresholding import norm, dynamic_threshold_segmentation
from avgn.vocalsegmentation.dynamic_thresholding import plot_segmented_spec
from avgn.visualization.spectrogram import plot_segmentations
from avgn.visualization.spectrogram import plot_spec, visualize_spec
from avgn.utils.paths import DATA_DIR, most_recent_subdirectory, ensure_dir
from avgn.utils.hparams import HParams
from avgn.dataset import DataSet
hparams = HParams(
n_fft = 4096,
mel_lower_edge_hertz=500,
mel_upper_edge_hertz=11025, # Should be sample_rate / 2 or less
butter_lowcut = 500,
butter_highcut = 12000,
ref_level_db = 20,
min_level_db = -100,
win_length_ms = 4,
hop_length_ms = 1,
num_mel_bins = 32,
mask_spec = True,
n_jobs = 1, # Makes processing serial if set to 1, parallel processing giving errors
verbosity=1,
nex = -1
)
### segmentation parameters
n_fft=1024
hop_length_ms=1
win_length_ms=10
ref_level_db=50
pre=0.97
min_level_db=-120
min_level_db_floor = -20
db_delta = 5
silence_threshold = 0.01
min_silence_for_spec=0.001
max_vocal_for_spec=0.49,
min_syllable_length_s = 0.025
butter_min = 500
butter_max = 8000
spectral_range = [500, 8000]
hparams.n_fft = n_fft
hparams.win_length_ms = win_length_ms
hparams.hop_length_ms = hop_length_ms
hparams.butter_lowcut = butter_min
hparams.butter_highcut = butter_max
hparams.ref_level_db = ref_level_db
hparams.min_level_db = min_level_db
hparams.min_level_db_floor = min_level_db_floor
hparams.db_delta = db_delta
hparams.min_silence_for_spec = min_silence_for_spec
hparams.max_vocal_for_spec = max_vocal_for_spec
hparams.silence_threshold = silence_threshold
syll_size = 128
hparams.mel_fiter = True # should a mel filter be used?
hparams.num_mels = syll_size, # how many channels to use in the mel-spectrogram (eg 128)
hparams.fmin = 300, # low frequency cutoff for mel filter
hparams.fmax = None, # high frequency cutoff for mel filter
hparams.power = 1.5;
### Add other parameters from here if required ####
# # spectrogram inversion
# 'max_iters':200,
# 'griffin_lim_iters':60,
# 'power':1.5,
# # Thresholding out noise
# 'mel_noise_filt' : 0.15, # thresholds out low power noise in the spectrum - higher numbers will diminish inversion quality
# # Vocal Envelope
# 'smoothing' : 'gaussian', # 'none',
# 'envelope_signal' : "spectrogram", # spectrogram or waveform, what to get the vocal envelope from
# 'gauss_sigma_s': .0001,
# 'FOI_min': 4, # minimum frequency of interest for vocal envelope (in terms of mel)
# 'FOI_max': 24, # maximum frequency of interest for vocal envelope (in terms of mel)
# # Silence Thresholding
# 'silence_threshold' : 0, # normalized threshold for silence
# 'min_len' : 5., # minimum length for a vocalization (fft frames)
# 'power_thresh': .3, # Threshold for which a syllable is considered to be quiet weak and is probably noise
# # Syllabification
# 'min_syll_len_s' : 0.03, # minimum length for a syllable
# 'segmentation_rate': 0.0,#0.125, # rate at which to dynamically raise the segmentation threshold (ensure short syllables)
# 'threshold_max': 0.25,
# 'min_num_sylls': 20, # min number of syllables to be considered a bout
# 'slow_threshold':0.0,#0.02, # second slower threshold
# 'max_size_syll': syll_size, # the size of the syllable
# 'resize_samp_fr': int(syll_size*5.0), # (frames/s) the framerate of the syllable (in compressed spectrogram time components)
# # Second pass syllabification
# 'second_pass_threshold_repeats':50, # the number of times to repeat the second pass threshold
# 'ebr_min': 0.05, # expected syllabic rate (/s) low
# 'ebr_max': 0.2, # expected syllabic rate (/s) high
# 'max_thresh': 0.02, # maximum pct of syllabic envelope to threshold at in second pass
# 'thresh_delta': 0.005, # delta change in threshold to match second pass syllabification
# 'slow_threshold': 0.005, # starting threshold for second pass syllabification
# 'pad_length' : syll_size, # length to pad spectrograms to
####
DATASET_ID = 'bengalese_finch_sakata'
dataset = DataSet(DATASET_ID, hparams = hparams)
# Print sample dataset
dataset.sample_json
rate, data = load_wav(dataset.sample_json["wav_loc"])
mypath = r'I:\avgn_paper-vizmerge\data\processed\bengalese_finch_sakata\2020-04-29_21-12-51\WAV'
# file_current = 'br81bl41_0016.wav'
file_current = 'br82bl42_0016.wav'
file_current = 'tutor_bl5w5_0017.WAV'
rate, data_loaded = load_wav(mypath+'\\'+file_current)
data = data_loaded
times = np.linspace(0,len(data)/rate,len(data));
# filter data
data = butter_bandpass_filter(data, butter_min, butter_max, rate)
plt.plot(times,data)
hparams.ref_level_db = 90
spec_orig = spectrogram(data,
rate,
hparams)
plot_spec(
norm(spec_orig),
fig=None,
ax=None,
rate=None,
hop_len_ms=None,
cmap=plt.cm.afmhot,
show_cbar=True,
figsize=(20, 6),
)
# segment
results = dynamic_threshold_segmentation(data,
hparams,
verbose=True,
min_syllable_length_s=min_syllable_length_s,
spectral_range=spectral_range)
plot_segmentations(
results["spec"],
results["vocal_envelope"],
results["onsets"],
results["offsets"],
int(hparams.hop_length_ms),
int(hparams.sample_rate),
figsize=(15,5)
)
plt.show()
# Function for batch processing all segments
import joblib
import json
from avgn.utils.json import NoIndent, NoIndentEncoder
def segment_spec_custom(key, df, DT_ID, save=False, plot=False):
# load wav
rate, data = load_wav(df.data["wav_loc"])
# filter data
data = butter_bandpass_filter(data, butter_min, butter_max, rate)
# segment
# results = dynamic_threshold_segmentation(
# data,
# rate,
# n_fft=n_fft,
# hop_length_ms=hop_length_ms,
# win_length_ms=win_length_ms,
# min_level_db_floor=min_level_db_floor,
# db_delta=db_delta,
# ref_level_db=ref_level_db,
# pre=pre,
# min_silence_for_spec=min_silence_for_spec,
# max_vocal_for_spec=max_vocal_for_spec,
# min_level_db=min_level_db,
# silence_threshold=silence_threshold,
# verbose=True,
# min_syllable_length_s=min_syllable_length_s,
# spectral_range=spectral_range,
# )
results = dynamic_threshold_segmentation(data,
hparams,
verbose=True,
min_syllable_length_s=min_syllable_length_s,
spectral_range=spectral_range)
if results is None:
return
if plot:
plot_segmentations(
results["spec"],
results["vocal_envelope"],
results["onsets"],
results["offsets"],
hop_length_ms,
rate,
figsize=(15, 3)
)
plt.show()
# save the results
json_out = DATA_DIR / "processed" / (DATASET_ID + "_segmented") / DT_ID / "JSON" / (
key + ".JSON"
)
json_dict = df.data.copy()
json_dict["indvs"][list(df.data["indvs"].keys())[0]]["syllables"] = {
"start_times": list(results["onsets"]),
"end_times": list(results["offsets"]),
}
json_txt = json.dumps(json_dict, cls=NoIndentEncoder, indent=2)
# save json
if save:
ensure_dir(json_out.as_posix())
with open(json_out.as_posix(), "w") as json_file:
json.dump(json_dict, json_file, cls=NoIndentEncoder, indent=2)
json_file.close()
# print(json_txt, file=open(json_out.as_posix(), "w"))
#print(json_txt)
return results
indvs = np.array(['_'.join(list(i)) for i in dataset.json_indv])
np.unique(indvs)
DT_ID = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
### Generate for two sample wav dataset
nex = 1
for indv in tqdm.tqdm(np.unique(indvs), desc="individuals"):
print(indv)
indv_keys = np.array(list(dataset.data_files.keys()))[indvs == indv][:nex]
joblib.Parallel(n_jobs=1, verbose=0)(
joblib.delayed(segment_spec_custom)(key, dataset.data_files[key], DT_ID, save=True, plot=False)
for key in tqdm.tqdm(indv_keys, desc="files", leave=False)
)
### Generate for full dataset
nex = -1
for indv in tqdm.tqdm(np.unique(indvs), desc="individuals"):
print(indv)
indv_keys = np.array(list(dataset.data_files.keys()))[indvs == indv]
joblib.Parallel(n_jobs=1, verbose=1)(
joblib.delayed(segment_spec_custom)(key, dataset.data_files[key], DT_ID, save=True, plot=False)
for key in tqdm.tqdm(indv_keys, desc="files", leave=False)
)
# If some of the files did not preocess properly and want to re run them
# Figure out indidual key first from indv_keys
# Find out the indv string by executing
for indv in tqdm.tqdm(np.unique(indvs), desc="individuals"):
print(indv)
indv_keys = np.array(list(dataset.data_files.keys()))[indvs == indv]
# USing the indv_keys run the following command
key = 'tutor_bl5w5_0030'
segment_spec_custom(key, dataset.data_files[key], DT_ID, save=True, plot=False)
# DT_ID = '2020-05-07_16-26-29'
# Create dataframe for zebra finch
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import tqdm
from joblib import Parallel, delayed
import umap
import pandas as pd
from avgn.utils.paths import DATA_DIR, most_recent_subdirectory, ensure_dir
DATASET_ID = 'bengalese_finch_sakata_segmented'
from avgn.utils.hparams import HParams
from avgn.dataset import DataSet
from avgn.signalprocessing.create_spectrogram_dataset import prepare_wav, create_label_df, get_row_audio
# Run the hparams code from previous segments
# hparams = HParams(
# num_mel_bins = 32,
# mel_lower_edge_hertz=250,
# mel_upper_edge_hertz=12000,
# butter_lowcut = 250,
# butter_highcut = 12000,
# ref_level_db = 20,
# min_level_db = -50,
# mask_spec = True,
# win_length_ms = 10,
# hop_length_ms = 2,
# nex=-1,
# n_jobs=-1,
# verbosity = 1,
# )
# create a dataset object based upon JSON segmented
dataset = DataSet(DATASET_ID, hparams = hparams)
from joblib import Parallel, delayed
n_jobs = 1; verbosity = 10
# See a sample dataset
dataset.sample_json
len(dataset.data_files)
# Create dataset based upon JSON
with Parallel(n_jobs=n_jobs, verbose=verbosity) as parallel:
syllable_dfs = parallel(
delayed(create_label_df)(
dataset.data_files[key].data,
hparams=dataset.hparams,
labels_to_retain=[],
unit="syllables",
dict_features_to_retain = [],
key = key,
)
for key in tqdm.tqdm(dataset.data_files.keys())
)
syllable_df = pd.concat(syllable_dfs)
len(syllable_df)
# See dample dataset after json is converted to dataframe
syllable_df[:3]
# Get audio for dataset
with Parallel(n_jobs=n_jobs, verbose=verbosity) as parallel:
syllable_dfs = parallel(
delayed(get_row_audio)(
syllable_df[syllable_df.key == key],
dataset.data_files[key].data['wav_loc'],
dataset.hparams
)
for key in tqdm.tqdm(syllable_df.key.unique())
)
syllable_df = pd.concat(syllable_dfs)
len(syllable_df)
# See sample dataframe after adding audio data to dataframe
syllable_df[:3]
# df_mask = np.array([len(i) > 0 for i in tqdm.tqdm(syllable_df.audio.values)])
# syllable_df = syllable_df[np.array(df_mask)]
# syllable_df[:3] # Sample dataframe
# sylls = syllable_df.audio.values
import librosa
syllable_df['audio'] = [librosa.util.normalize(i) for i in syllable_df.audio.values]
sylls = syllable_df['audio'].values
### Plot time domain form of syllables
nrows = 5
ncols = 10
zoom = 2
fig, axs = plt.subplots(ncols=ncols, nrows = nrows,figsize = (ncols*zoom, nrows+zoom/1.5))
for i, syll in tqdm.tqdm(enumerate(sylls), total = nrows*ncols):
ax = axs.flatten()[i]
ax.plot(syll)
if i == nrows*ncols -1:
break
# Create Spectrograms
from avgn.visualization.spectrogram import draw_spec_set
from avgn.signalprocessing.create_spectrogram_dataset import make_spec, mask_spec, log_resize_spec, pad_spectrogram
syllables_wav = syllable_df.audio.values
syllables_rate = syllable_df.rate.values
### Section for checking how sample spectrograms looks like with hparams settings
### and change them as necessary
hparams.ref_level_db = 80
spec_1 = make_spec(
syllables_wav[10],
syllables_rate[10],
hparams=dataset.hparams,
mel_matrix=dataset.mel_matrix,
use_mel=True,
use_tensorflow=False,
)
from matplotlib import cm
fig, ax = plt.subplots(figsize=(4, 5))
ax.matshow(spec_1,cmap=cm.afmhot)
ax.axis('off')
######
## Run sepctrograms with these hparams settings for whole dataset
with Parallel(n_jobs=n_jobs, verbose=verbosity) as parallel:
# create spectrograms
syllables_spec = parallel(
delayed(make_spec)(
syllable,
rate,
hparams=dataset.hparams,
mel_matrix=dataset.mel_matrix,
use_mel=True,
use_tensorflow=False,
)
for syllable, rate in tqdm.tqdm(
zip(syllables_wav, syllables_rate),
total=len(syllables_rate),
desc="getting syllable spectrograms",
leave=False,
)
)
# Rescale spectrogram (using log scaling)
log_scaling_factor = 4
with Parallel(n_jobs=n_jobs, verbose=verbosity) as parallel:
syllables_spec = parallel(
delayed(log_resize_spec)(spec, scaling_factor=log_scaling_factor)
for spec in tqdm.tqdm(syllables_spec, desc="scaling spectrograms", leave=False)
)
# Check to see how the specrograms look like after rescaling
draw_spec_set(syllables_spec, zoom=1, maxrows=10, colsize=25)
# Pad spectrograms
syll_lens = [np.shape(i)[1] for i in syllables_spec]
pad_length = np.max(syll_lens)
syllable_df[:3]
import seaborn as sns
with Parallel(n_jobs=n_jobs, verbose=verbosity) as parallel:
syllables_spec = parallel(
delayed(pad_spectrogram)(spec, pad_length)
for spec in tqdm.tqdm(
syllables_spec, desc="padding spectrograms", leave=False
)
)
# Check to see how the specrograms look like after padding
draw_spec_set(syllables_spec, zoom=1, maxrows=10, colsize=25)
np.shape(syllables_spec)
syllable_df['spectrogram'] = syllables_spec
syllable_df[:3]
# View syllables per individual
for indv in np.sort(syllable_df.indv.unique()):
print(indv, np.sum(syllable_df.indv == indv))
specs = np.array([i/np.max(i) for i in syllable_df[syllable_df.indv == indv].spectrogram.values])
specs[specs<0] = 0
draw_spec_set(specs, zoom=2,
maxrows=16,
colsize=25,
fig_title=indv,
num_indv=str(np.sum(syllable_df.indv == indv)))
save_loc = DATA_DIR / 'syllable_dfs' / DATASET_ID / 'bf_sakata_Bluebrown.pickle'
ensure_dir(save_loc)
syllable_df.to_pickle(save_loc)
| 2.453125
| 2
|
src/CollageImage.py
|
1alexandra/collage
| 1
|
12775743
|
from PIL import Image
from src.utils import int_clamp
from PIL import UnidentifiedImageError
import tkinter.messagebox as messagebox
def safe_open_image(filename, corner_creator):
image = None
try:
if filename is not None and filename != "":
image = PILCollageImage(filename, corner_creator)
except UnidentifiedImageError:
messagebox.showerror("Error", "Failed open file {0}".format(filename))
return image
class ViewingWindow:
"""
Class for managing viewing window in original image
"""
def __init__(self, original, scale_step=0.05, scale_value_min=0.2, move_step=5):
self.original = original
self._image_size = None
self.scale_value = 1
self.scale_step = scale_step
self.move_step = move_step
self.view_vector = (0, 0)
self.scale_value_min = scale_value_min
self._borders = None
self._corner = None
self._actual_im_size = None
width = property()
height = property()
@width.getter
def width(self):
return self._image_size[0] * self.scale_value
@height.getter
def height(self):
return self._image_size[1] * self.scale_value
def _update_params(self):
center = (self.original.width / 2 + self.view_vector[0], self.original.height / 2 + self.view_vector[1])
left = center[0] - self.width / 2
upper = center[1] - self.height / 2
right = center[0] + self.width / 2
lower = center[1] + self.height / 2
new_borders = (
int_clamp(left, min_val=0),
int_clamp(upper, min_val=0),
int_clamp(right, max_val=self.original.width),
int_clamp(lower, max_val=self.original.height)
)
new_width = int_clamp(
(new_borders[2] - new_borders[0]) / self.scale_value, min_val=1, max_val=self._image_size[0])
new_height = int_clamp(
(new_borders[3] - new_borders[1]) / self.scale_value, min_val=1, max_val=self._image_size[1])
corner_x = int_clamp(-left / self.scale_value, min_val=0, max_val=self._image_size[0] - 1)
corner_y = int_clamp(-upper / self.scale_value, min_val=0, max_val=self._image_size[1] - 1)
self._borders = new_borders
self._actual_im_size = (new_width, new_height)
self._corner = (corner_x, corner_y)
def get(self):
"""
Crops rectangle from original image and resizes it to image size
Returns cropped PIL Image
"""
return self.original.crop(self._borders).resize(self._actual_im_size)
def _scale(self, new_scale_value):
self.scale_value = new_scale_value
self._update_params()
def resize(self, size):
self._image_size = int_clamp(size[0], min_val=1), int_clamp(size[1], min_val=1)
self._update_params()
def move(self, dx, dy):
self.view_vector = (
self.view_vector[0] + dx * self.scale_value,
self.view_vector[1] + dy * self.scale_value)
self._update_params()
def zoom_in(self):
self._scale(max(self.scale_value - self.scale_step, self.scale_value_min))
def zoom_out(self):
self._scale(self.scale_value + self.scale_step)
def move_up(self):
self.move(dx=0, dy=-self.move_step)
def move_down(self):
self.move(dx=0, dy=self.move_step)
def move_left(self):
self.move(dx=-self.move_step, dy=0)
def move_right(self):
self.move(dx=self.move_step, dy=0)
corner = property()
@corner.getter
def corner(self):
return self._corner
class PILCollageImage:
def __init__(self, filename, corner_creator):
self.corners = corner_creator
original = Image.open(filename)
self.viewing_window = ViewingWindow(original)
self._corner = None
def resize(self, size):
"""
Resize the image
size – The requested size in pixels, as a 2-tuple: (width, height).
"""
self.viewing_window.resize(size)
def move_view_up(self):
self.viewing_window.move_up()
def move_view(self, dx, dy):
self.viewing_window.move(dx=dx, dy=dy)
def move_view_down(self):
self.viewing_window.move_down()
def move_view_left(self):
self.viewing_window.move_left()
def move_view_right(self):
self.viewing_window.move_right()
def zoom_in(self):
self.viewing_window.zoom_in()
def zoom_out(self):
self.viewing_window.zoom_out()
def _update_corners(self, img):
alpha = self.corners.get_alpha(img.size)
img.putalpha(alpha)
return img
PhotoImage = property()
corner = property()
ViewingWindow = property()
@PhotoImage.getter
def PIL(self):
return self._update_corners(self.viewing_window.get())
@corner.getter
def corner(self):
return self.viewing_window.corner
@ViewingWindow.getter
def ViewingWindow(self):
return self.viewing_window
| 3.125
| 3
|
article/tests/test_urls.py
|
mohitkhatri611/mumbleapi
| 1
|
12775744
|
from django.test import SimpleTestCase
from django.urls import reverse , resolve
from article.views import *
class TestUrls(SimpleTestCase):
def test_articles_url_is_resolved(self):
url = reverse('mumbles-api-articles:articles')
self.assertEquals(resolve(url).func,articles)
def test_articles_created_url_is_resolved(self):
url = reverse('mumbles-api-articles:create-article')
self.assertEquals(resolve(url).func,createArticle)
def test_articles_vote_url_is_resolved(self):
url = reverse('mumbles-api-articles:article-vote')
self.assertEquals(resolve(url).func,updateVote)
def test_get_article_url_is_resolved(self):
url = reverse('mumbles-api-articles:get-article',args=['sOmE-iD'])
self.assertEquals(resolve(url).func,getArticle)
def test_edit_article_url_is_resolved(self):
url = reverse('mumbles-api-articles:edit-article',args=['sOmE-iD'])
self.assertEquals(resolve(url).func,editArticle)
def test_delete_article_url_is_resolved(self):
url = reverse('mumbles-api-articles:delete-article',args=['sOmE-iD'])
self.assertEquals(resolve(url).func,deleteArticle)
def test_edit_article_comment_url_is_resolved(self):
url = reverse('mumbles-api-articles:edit-article-comment',args=['sOmE-iD'])
self.assertEquals(resolve(url).func,editArticleComment)
def test_delete_article_comment_url_is_resolved(self):
url = reverse('mumbles-api-articles:delete-article-comment',args=['sOmE-iD'])
self.assertEquals(resolve(url).func,deleteArticleComment)
| 2.484375
| 2
|
game/game.py
|
petia2009/BOT
| 0
|
12775745
|
from game.field import FieldXO
from game.player import Player, Bot
class Game:
__field__: FieldXO = None
__status__: bool = False
__active_player__: Player
def __init__(self, player_x_id: str, player_o_id: str):
self.__playerX__ = self.__create_player(player_x_id, 'X')
self.__playerO__ = self.__create_player(player_o_id, 'O')
self.__active_player__ = self.__playerX__
@staticmethod
def __create_player(player_id: str, player_type: str) -> Player:
if player_id == 'bot':
return Bot(player_type=player_type)
else:
return Player(player_id=player_id, player_type=player_type)
def change_status(self, status: bool):
self.__status__ = status
def get_status(self):
return self.__status__
def get_field(self):
return self.__field__
def new_game(self):
self.__field__ = FieldXO()
self.__status__ = True
def end_game(self):
self.__field__ = None
self.__status__ = False
def __player_move(self, x: int, y: int, player: Player) -> bool:
if player.get_player_id() == self.__active_player__.get_player_id():
if self.__field__.fill_cell(x, y, player.get_player_type()):
self.__change_active_player()
return True
else:
return False
else:
return False
def __change_active_player(self):
if self.__active_player__ == self.__playerX__:
self.__active_player__ = self.__playerO__
else:
self.__active_player__ = self.__playerX__
def provide_player_move(self, x: int, y: int, player_id: str) -> str:
player = self.__define_player(player_id)
if not self.__player_move(x, y, player):
return "Клетка занята! или не ваш ход"
if self.get_field().is_winner(player_type=player.get_player_type()):
self.change_status(status=False)
return "Вы выиграли!"
if not self.get_field().has_empty_cell():
self.change_status(status=False)
return "Ничья"
else:
if self.__active_player__.get_player_id() == 'bot':
player = self.__active_player__
self.__bot_move(player)
if self.get_field().is_winner(player_type=player.get_player_type()):
self.change_status(status=False)
return "Вы проиграли!"
if not self.get_field().has_empty_cell():
self.change_status(status=False)
return "Ничья"
return "Ваш ход"
return "ожидание хода другого игрока"
def __define_player(self, player_id: str) -> Player:
if self.__playerX__.__player_id__ == player_id:
return self.__playerX__
elif self.__playerO__.__player_id__ == player_id:
return self.__playerO__
else:
raise Exception('Не найден игрок с id={0}'.format(player_id))
def __bot_move(self, player: Player):
cell = player.get_move_cell(self.__field__)
if not self.__player_move(cell.x, cell.y, player):
raise Exception('Игровой бот сломался, выдает неправильные координаты хода, '
'cell={0} или не его ход, active_player={1}'.format(cell, self.__active_player__))
| 3.03125
| 3
|
ClusterFind.py
|
LasTAD/VAST-2017-MC-1
| 1
|
12775746
|
import numpy as np
from SOM import SOM
data = np.loadtxt('Data/output.txt', delimiter=';', usecols=range(40))
###SOM
som = SOM(10, 10) # initialize the SOM
som.fit(data, 10000, decay='hill')
# som = SOM(10, 10) # initialize the SOM
# som.load('Data/SOM')
targets = np.loadtxt('Data/target.txt', dtype='int')
targets = targets - 1
names = ['Автомобиль',
'Грузовик 2',
'Грузовик 3',
'Грузовик 4+',
'Автобус 2',
'Автобус 3',
'<NAME>'
]
# now visualize the learned representation with the class labels
som.plot_point_map(data, targets, names, filename='images/SOM/som.png')
# for name in names:
# som.plot_class_density(data, targets, t=names.index(name), name=name, filename='images/SOM/density ' + name + '.png')
# som.save('SOM')
| 3.421875
| 3
|
LAB03/02-S3/backend/cloudalbum/database/model_ddb.py
|
liks79/moving-to-serverless-renew
| 6
|
12775747
|
<reponame>liks79/moving-to-serverless-renew
"""
cloudalbum/database/model_ddb.py
~~~~~~~~~~~~~~~~~~~~~~~
Application data model defined here.
:description: CloudAlbum is a fully featured sample application for 'Moving to AWS serverless' training course
:copyright: © 2019 written by <NAME>, <NAME>.
:license: MIT, see LICENSE for more details.
"""
from datetime import datetime
from pynamodb.models import Model
from pynamodb.attributes import UnicodeAttribute, NumberAttribute, UTCDateTimeAttribute
from pynamodb.indexes import GlobalSecondaryIndex, IncludeProjection
from tzlocal import get_localzone
import json
import boto3
AWS_REGION = boto3.session.Session().region_name
class EmailIndex(GlobalSecondaryIndex):
"""
This class represents a global secondary index
"""
class Meta:
index_name = 'user-email-index'
read_capacity_units = 2
write_capacity_units = 1
projection = IncludeProjection(['password'])
# This attribute is the hash key for the index
# Note that this attribute must also exist
# in the model
email = UnicodeAttribute(hash_key=True)
class User(Model):
"""
User table for DynamoDB
"""
class Meta:
table_name = 'User'
region = AWS_REGION
id = UnicodeAttribute(hash_key=True)
email_index = EmailIndex()
email = UnicodeAttribute(null=False)
username = UnicodeAttribute(null=False)
password = UnicodeAttribute(null=False)
class Photo(Model):
"""
Photo table for DynamoDB
"""
class Meta:
table_name = 'Photo'
region = AWS_REGION
user_id = UnicodeAttribute(hash_key=True)
id = UnicodeAttribute(range_key=True)
tags = UnicodeAttribute(null=True)
desc = UnicodeAttribute(null=True)
filename_orig = UnicodeAttribute(null=True)
filename = UnicodeAttribute(null=True)
filesize = NumberAttribute(null=True)
geotag_lat = UnicodeAttribute(null=True)
geotag_lng = UnicodeAttribute(null=True)
upload_date = UTCDateTimeAttribute(default=datetime.now(get_localzone()))
taken_date = UTCDateTimeAttribute(null=True)
make = UnicodeAttribute(null=True)
model = UnicodeAttribute(null=True)
width = UnicodeAttribute(null=True)
height = UnicodeAttribute(null=True)
city = UnicodeAttribute(null=True)
nation = UnicodeAttribute(null=True)
address = UnicodeAttribute(null=True)
class ModelEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, 'attribute_values'):
return obj.attribute_values
elif isinstance(obj, datetime):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
def photo_deserialize(photo):
photo_json = {}
photo_json['id'] = photo.id
photo_json['filename'] = photo.filename
photo_json['filename_orig'] = photo.filename_orig
photo_json['filesize'] = photo.filesize
photo_json['upload_date'] = photo.upload_date
photo_json['tags'] = photo.tags
photo_json['desc'] = photo.desc
photo_json['geotag_lat'] = photo.geotag_lat
photo_json['geotag_lng'] = photo.geotag_lng
photo_json['taken_date'] = photo.taken_date
photo_json['make'] = photo.make
photo_json['model'] = photo.model
photo_json['width'] = photo.width
photo_json['height'] = photo.height
photo_json['city'] = photo.city
photo_json['nation'] = photo.nation
photo_json['address'] = photo.address
return photo_json
| 2.171875
| 2
|
main.py
|
AbijahKaj/DONT
| 20
|
12775748
|
'''
##### DO Not Touch your face ver.0.2
### Medical Imaging & Intelligent Reality Lab (MI2RL) @ Asan Medical Center(AMC)
# MI2RL website : https://www.mi2rl.co/
# AMC : http://www.amc.seoul.kr/asan/main.do
### Developer
# <NAME> : <EMAIL>
# <NAME> : <EMAIL>
# <NAME> : <EMAIL>
# <NAME> : <EMAIL>
# <NAME> : <EMAIL>
### Data contributor
# MI2RL researchers
# <NAME>, Emergency Medicine@AMC
# <NAME>, Convergence Medicine@AMC
### references
# I3D Network (https://github.com/hassony2/kinetics_i3d_pytorch)
#####
'''
import cv2
import sys
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from gui_viewer import GUIViewer
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
viewer = GUIViewer()
viewer.show()
app.exec_()
| 2
| 2
|
tests/test_satellite/test_time.py
|
JaiWillems/SatPy
| 6
|
12775749
|
<filename>tests/test_satellite/test_time.py
"""Testing module for the `Time` class."""
from astropy import coordinates, time
from celest.satellite.time import Time
from unittest import TestCase
import julian as jd
import unittest
import numpy as np
class TestTime(TestCase):
def setUp(self):
"""Test fixure for test method execution."""
self.julData = np.array([2455368.75, 2459450.85, 2456293.5416666665])
self.astropy_time = time.Time(self.julData, format="jd")
def test_true_solar_time(self):
"""Test `Time.true_solar_time`.
Notes
-----
Test cases are generated from the Global Monitoring Labratory and the
Government of Australia.[1]_[2]_
References
----------
.. [1] NOAA US Department of Commerce. ESRL Global Monitoring
Laboratory -Global Radiation and Aerosols. url:
https://gml.noaa.gov/grad/solcalc/.
.. [2] Time Conventions. url:http://www.bom.gov.au/climate/data-services/solar/content/data-time.html.
"""
julian = np.array([2455368.75, 2459450.85, 2456293.5416666665])
lon = np.array([-105, -118.24, 147.46])
tst = np.array([23.0715, 0.378059, 10.76556])
calc_tst = Time(julian=julian).true_solar_time(longitude=lon)
for i in range(calc_tst.size):
with self.subTest(i=i):
self.assertAlmostEqual(tst[i], calc_tst[i], delta=0.11)
def test_mean_solar_time(self):
"""Test `Time.mean_solar_time`.
Notes
-----
Test cases are generated from the Government of Australia.[1]_
References
----------
.. [1] Time Conventions. url: http://www.bom.gov.au/climate/data-services/solar/content/data-time.html.
"""
julData = np.array([2456293.5416666665])
lon = np.array([147.46])
tst = np.array([10.83056])
calc_tst = Time(julian=julData).mean_solar_time(longitude=lon)
for i in range(calc_tst.size):
with self.subTest(i=i):
self.assertAlmostEqual(tst[i], calc_tst[i], delta=0.001)
def test_true_hour_angle(self):
"""Test `Time.true_hour_angle`.
Notes
-----
Test cases are taken from Global Monitoring Labratory.[1]_
References
----------
.. [1] NOAA US Department of Commerce. ESRL Global Monitoring
Laboratory -Global Radiation and Aerosols. url:
https://gml.noaa.gov/grad/solcalc/.
"""
julData = np.array([2455368.75, 2459450.85, 2456293.5416666665])
lon = np.array([-105, -118.24, 147.46])
hour_angle = np.array([166.0734, 185.671, 341.53]) / 15 % 24
calc_hour_angle = Time(julian=julData).true_hour_angle(longitude=lon)
for i in range(calc_hour_angle.size):
with self.subTest(i=i):
self.assertAlmostEqual(hour_angle[i], calc_hour_angle[i], delta=1.51)
def test_mean_hour_angle(self):
"""Test `Time.true_hour_angle`.
Notes
-----
Test cases are self generated using the definition of the mean hour
angle.
"""
julData = np.array([2456293.5416666665])
lon = np.array([147.46])
hour_angle = np.array([342.4584]) / 15 % 24
calc_hour_angle = Time(julian=julData).mean_hour_angle(longitude=lon)
for i in range(calc_hour_angle.size):
with self.subTest(i=i):
self.assertAlmostEqual(hour_angle[i], calc_hour_angle[i], delta=0.01)
def test_ut1(self):
"""Test `Time.UT1`.
Notes
-----
Test cases are generated using the `Astropy` Python package.
"""
dt = self.astropy_time.get_delta_ut1_utc().value / 3600
ut1 = self.astropy_time.to_value("decimalyear") % 1
ut1 = (ut1 * 365 * 24) % 24 + dt
calc_ut1 = Time(julian=self.julData).ut1()
for i in range(calc_ut1.size):
with self.subTest(i=i):
self.assertAlmostEqual(ut1[i], calc_ut1[i], delta=0.001)
def test_julian(self):
"""Test `Time.julian`."""
calc_julian = Time(julian=self.julData).julian()
for i in range(calc_julian.size):
with self.subTest(i=i):
self.assertEqual(self.julData[i], calc_julian[i])
def test_datetime(self):
"""Test `Time.datetime`."""
calc_datetime = Time(julian=self.julData).datetime()
for i in range(calc_datetime.size):
with self.subTest(i=i):
dt = jd.from_jd(self.julData[i])
self.assertEqual(calc_datetime[i].year, dt.year)
self.assertEqual(calc_datetime[i].month, dt.month)
self.assertEqual(calc_datetime[i].day, dt.day)
self.assertEqual(calc_datetime[i].second, dt.second)
self.assertAlmostEqual(calc_datetime[i].microsecond, dt.microsecond, delta=1)
def test_gmst(self):
"""Test `Time.gmst`.
Notes
-----
Test cases are generated using the `Astropy` Python package.
"""
gmst = self.astropy_time.sidereal_time("mean", "greenwich")
gmst = coordinates.Angle(gmst).hour
calc_gmst = Time(julian=self.julData).gmst()
for i in range(calc_gmst.size):
with self.subTest(i=i):
self.assertAlmostEqual(gmst[i], calc_gmst[i], delta=0.0001)
def test_lmst(self):
"""Test `Time.lmst`.
Notes
-----
Test cases are generated using the `Astropy` Python package.
"""
lmst = self.astropy_time.sidereal_time("mean", longitude="150")
lmst = coordinates.Angle(lmst).hour
calc_lmst = Time(julian=self.julData).lmst(longitude=150)
for i in range(calc_lmst.size):
with self.subTest(i=i):
self.assertAlmostEqual(lmst[i], calc_lmst[i], delta=0.1)
def test_gast(self):
"""Test `Time.gast`.
Notes
-----
Test cases are generated using the `Astropy` Python package.
"""
gast = self.astropy_time.sidereal_time("apparent", "greenwich")
gast = coordinates.Angle(gast).hour
calc_gast = Time(julian=self.julData).gast()
for i in range(calc_gast.size):
with self.subTest(i=i):
self.assertAlmostEqual(gast[i], calc_gast[i], delta=0.0001)
def test_last(self):
"""Test `Time.last`.
Notes
-----
Test cases are generated using the `Astropy` Python package.
"""
last = self.astropy_time.sidereal_time("apparent", longitude="150")
last = coordinates.Angle(last).hour
calc_last = Time(julian=self.julData).last(longitude=150)
for i in range(calc_last.size):
with self.subTest(i=i):
self.assertAlmostEqual(last[i], calc_last[i], delta=0.1)
if __name__ == "__main__":
unittest.main()
| 2.8125
| 3
|
application.py
|
JPGarCar/HORS
| 1
|
12775750
|
<filename>application.py
#!/usr/bin/python
## Things to improve:
# - return redirect(url_for(""))
# - database INDEX ids to be faster at searching
# DONE - SELECT * FROM users JOIN zipcodes ON users.zipcode = zipcodes.id 59:00
# DONE - use foreign keys to not having reocurring data in a table, the data is in a different table and its id is in the original table
# DONE - use of classes ORM, in minute 1hr 30 min week 9 using SQLAlchemy
# DONE - hashed values for passwords
# - use javascript code or other peoples code to make sign up better as well as sing in and old user page
# - javascript warning for special functions
# - javascript for the admin page search filters
# - javascript edit teacher info warnings
# - javascript generate special code for teachers
# - use localStorage in javascript for add committee country etc, pages so no need to use session in python
# DONE - Get GITHUB working here
# - use ajax to get new admin data every so time or after click of button
# DONE - add num of available assignments on user_newTeacherPage
## javascript is not an alternative to checks and validation ##
## Thus you need to do it in both python and javascript ##
## mix javascript with bootstrap ##
# import multiple packeges
# flask for web service
from flask import Flask, redirect, render_template, request, url_for, session, flash, send_file
# flask-login
# from flask_login import LoginManager, login_required
# for the random assignment generator
import random
# basic math needed
import math
# for sentry issue resolver
# from raven.contrib.flask import Sentry
# for pasword hashing
from passlib.apps import custom_app_context as pwd_context
# for the random string in the special code
import string
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import or_
# defines variable 'app' to the flask
application = app = Flask(__name__)
app.secret_key = "<KEY>"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///immuns.db"
app.config["SQLALCHEMY_ECHO"] = False
db = SQLAlchemy(app)
import helpers
import model_helpers as modHelpers
from typeOfCommittee import TypeOfCom
from Important import Important
from advanced import Advanced
from models import Teacher, Committee, Assignment, Delegate
# if there is any session data on the users computer then clear it
if session:
session.clear()
session.pop('_flashes', None)
# login_manager = LoginManager()
# login_manager.init_app(app)
# assing sentry its code to connect to account
# sentry = Sentry(app, dsn='https://effbb4c86ee440eebbb9f8d2de12cd6f:e32dca8980a440e699031e80789d3a06@sentry.io/1238266')
##### no globals are used, instead the session module from flask is used, it uses cookies so that is way better.
###############################################################################################################################################################
###############################################################################################################################################################
####################################################### Routes for main Page ###############################################################################
###############################################################################################################################################################
###############################################################################################################################################################
# @login_manager.user_loader
# def load_user(user_id):
# return Teacher.query.get(user_id)
### / (GET POST -> templateRendered)
### main route to the sign in page,
### GET: returns the registration template
### POST: signs in a teacher if email and password match or sign admin
@app.route("/", methods=["GET", "POST"])
def user_registration():
### GET
if request.method == "GET":
return render_template("user_registration.html")
### POST
elif request.method == "POST":
# restart seesion
session["currentTeacher"] = None
### Check In Admin ###
if request.form["signInEmail"] == "<EMAIL>" and pwd_context.verify(request.form["signInPassword"],
pwd_context.hash("<PASSWORD>")):
session["adminIn"] = True
return modHelpers.returnAdminPage("", None)
### Sign In Teacher ###
# grabs the Teacher from table teacher with the inputed email
teachers = Teacher.query.filter(Teacher.email == request.form["signInEmail"]).all()
# loops over the teacher, if there is no teacher it will not enter the loop and return the same page with flash
for teacher in teachers:
# check the hashed password
if pwd_context.verify(request.form["signInPassword"], teacher.password):
if teacher.name != "":
# assign session variables
session["currentTeacher"] = teacher.getTeacherSession()
session["currentUserId"] = teacher.id
# log in user for Flask-Login
# login_user(teacher)
# variable "numDelOfTeacher" has the number from sepcial code
numDelOfTeacher = teacher.getNumOfMaxStudents()
# assign assignments in current teacher
numNow = teacher.getNumOfStudents()
# if the teacher has same assignments as his code permits then go to teacher page old
if numNow == numDelOfTeacher:
return teacher.returnUserPageOld()
# else go get more delgates
else:
# assigns 'numRem' the number of delegates remainging
numRem = numDelOfTeacher - numNow
return modHelpers.renderNewTeacherPage(teacher, numRem)
flash(
"You have entered an incorrect password, please try again. If the problem persists, call your HOSPITALITY member for asistance.")
return render_template("user_registration.html")
flash(
"You have entered an incorrect email, please try again. If the problem persists, call your HOSPITALITY member for asistance.")
return render_template("user_registration.html")
### /user_signUp (GET POST -> templateRendered)
### user_signUp reoute to sign up a new teacher
### GET: returns the user_signUp template
### POST: checks if all fields are filled and correct and makes the new teacher
@app.route("/user_signUp", methods=["POST", "GET"])
def user_signUp():
### GET
if request.method == "GET":
return render_template("user_signUp.html")
### POST
elif request.method == "POST":
### Validate confirmation code ###
# checks confirmation code validity using getSpecial() if not vaild return same page with flash error
if helpers.getSpecial(request.form["confirmationCode"]) == None:
flash("You have entered an incorrect confirmation code.")
flash("Please enter a valid confirmation code, if the problem persists, contact your HOSPITALITY member.")
return render_template("user_signUp.html")
else:
### Check Email Availability ###
# checks if email is already in table
email = Teacher.query.filter_by(email=request.form["email"]).first()
# if email inputed is already in use return same page with flash error
if email is not None:
flash(
"The email you have entered is already in use. If you do not remember your password please contact your HOSPITALITY member.")
return render_template("user_signUp.html")
### Check Passwords Match ###
if not request.form["password"] == request.form["<PASSWORD>"]:
flash("The passwords that you have entered do not match, please try again.")
return render_template("user_signUp.html")
### Adding teacher ###
teacher = Teacher(request.form["personName"], request.form["email"],
pwd_context.hash(request.form["password"]), request.form["school"],
request.form["confirmationCode"])
db.session.add(teacher)
db.session.commit()
# return template user_signUpSuccess
return render_template("user_signUpSuccess.html")
### /user_signUpSuccess (GET POST -> templateRendered)
### user_signUpSuccess route, simple node
### GET: returns the user_signUpSuccess template
### POST: returnes the teacher to registration template
@app.route("/user_signUpSuccess", methods=["POST", "GET"])
def user_signUpSuccess():
### POST
if request.method == "POST":
return render_template("user_registration.html")
### GET
else:
return render_template("user_signUpSuccess.html")
# Helper Function
# USAGE: Will assign a country to a teacher or return a message error
def assign_helper(looking_for, type_of_committee, is_important, is_advanced, teacher):
if looking_for != 0:
available = modHelpers.stillAvailable(typeOfCom=type_of_committee, important=is_important, advanced=is_advanced)
if available >= looking_for:
modHelpers.randomCountry(looking_for, type_of_committee, is_important, teacher, is_advanced)
return None
elif available != 0:
# assign the available assignments
modHelpers.randomCountry(available, type_of_committee, is_important, teacher, is_advanced)
return "We were only able to assign " + str(available) + " " + TypeOfCom.to_string(type_of_committee) + \
(' Important' if is_important == Important.YES.value else '') + \
(' Advanced' if is_advanced == Advanced.YES.value else '') + " assignments. The remaining " + \
str(looking_for - available) + " assignments are still at your disposal."
else:
return "There are not enough " + TypeOfCom.to_string(type_of_committee) + \
(' Important' if is_important == Important.YES.value else '') + \
(' Advanced' if is_advanced == Advanced.YES.value else '') + " assignments, there are only " + \
str(available) + " available. You asked for: " + str(looking_for)
# /user_newTeacherPage (POST -> templateRendered)
# user_newTeacherPage route, for the new teachers that need to select the number of assignments
# POST: let teachers select number of assignments limited to their code limit
@app.route("/user_newTeacherPage", methods=["POST"])
# @login_required
def user_newTeacherPage():
if request.method == "POST" and not session["currentTeacher"] is None:
# grab teacher that is signed in
teacher = Teacher.query.get(session["currentUserId"])
# assigns the number of delegates requested in each variable
MSE = helpers.assignToInt(request.form["MSE"])
MSS = helpers.assignToInt(request.form["MSS"])
HSE = helpers.assignToInt(request.form["HSE"])
HSS = helpers.assignToInt(request.form["HSS"])
HSEI = helpers.assignToInt(request.form["HSEI"])
MSEI = helpers.assignToInt(request.form["MSEI"])
MSSI = helpers.assignToInt(request.form["MSSI"])
HSSI = helpers.assignToInt(request.form["HSSI"])
MSEA = helpers.assignToInt(request.form["MSEA"])
MSSA = helpers.assignToInt(request.form["MSSA"])
HSEA = helpers.assignToInt(request.form["HSEA"])
HSSA = helpers.assignToInt(request.form["HSSA"])
MSEAI = helpers.assignToInt(request.form["MSEAI"])
MSSAI = helpers.assignToInt(request.form["MSSAI"])
HSEAI = helpers.assignToInt(request.form["HSEAI"])
HSSAI = helpers.assignToInt(request.form["HSSAI"])
G6EN = helpers.assignToInt(request.form["G6EN"])
# assings 'number' number of requested delegates plus delegates already in the teacher's table
number = MSE + MSS + HSE + HSS + MSEI + HSEI + MSSI + HSSI + MSEA + MSSA + \
HSEA + HSSA + G6EN + MSEAI + MSSAI + HSEAI + HSSAI + teacher.getNumOfStudents()
# grabs the teacher's number of students
target = teacher.getNumOfMaxStudents()
# goes over all the requested delegates checking there are requested of such type
# and there are remaining in generalList
# if true, then calls randomCountry() to assign the assignment
if number == target:
# list for possible error messages
error_list = []
# if there are not enough assignments available adds an error to the list and
# does not add any assignments of the type
# regular committees
error_list.append(assign_helper(looking_for=MSE, type_of_committee=TypeOfCom.MSEN.value,
is_important=Important.NO.value,
is_advanced=Advanced.NO.value, teacher=teacher))
error_list.append(assign_helper(looking_for=MSS, type_of_committee=TypeOfCom.MSSP.value,
is_important=Important.NO.value,
is_advanced=Advanced.NO.value, teacher=teacher))
error_list.append(assign_helper(looking_for=HSE, type_of_committee=TypeOfCom.HSEN.value,
is_important=Important.NO.value,
is_advanced=Advanced.NO.value, teacher=teacher))
error_list.append(assign_helper(looking_for=HSS, type_of_committee=TypeOfCom.HSSP.value,
is_important=Important.NO.value,
is_advanced=Advanced.NO.value, teacher=teacher))
# important assignments in regular committees
error_list.append(assign_helper(looking_for=MSEI, type_of_committee=TypeOfCom.MSEN.value,
is_important=Important.YES.value,
is_advanced=Advanced.NO.value, teacher=teacher))
error_list.append(assign_helper(looking_for=MSSI, type_of_committee=TypeOfCom.MSSP.value,
is_important=Important.YES.value,
is_advanced=Advanced.NO.value, teacher=teacher))
error_list.append(assign_helper(looking_for=HSEI, type_of_committee=TypeOfCom.HSEN.value,
is_important=Important.YES.value,
is_advanced=Advanced.NO.value, teacher=teacher))
error_list.append(assign_helper(looking_for=HSSI, type_of_committee=TypeOfCom.HSSP.value,
is_important=Important.YES.value,
is_advanced=Advanced.NO.value, teacher=teacher))
# advanced committees
error_list.append(assign_helper(looking_for=MSEA, type_of_committee=TypeOfCom.MSEN.value,
is_important=Important.NO.value,
is_advanced=Advanced.YES.value, teacher=teacher))
error_list.append(assign_helper(looking_for=MSSA, type_of_committee=TypeOfCom.MSSP.value,
is_important=Important.NO.value,
is_advanced=Advanced.YES.value, teacher=teacher))
error_list.append(assign_helper(looking_for=HSEA, type_of_committee=TypeOfCom.HSEN.value,
is_important=Important.NO.value,
is_advanced=Advanced.YES.value, teacher=teacher))
error_list.append(assign_helper(looking_for=HSSA, type_of_committee=TypeOfCom.HSSP.value,
is_important=Important.NO.value,
is_advanced=Advanced.YES.value, teacher=teacher))
# important assignments in advanced committees
error_list.append(assign_helper(looking_for=MSEAI, type_of_committee=TypeOfCom.MSEN.value,
is_important=Important.YES.value,
is_advanced=Advanced.YES.value, teacher=teacher))
error_list.append(assign_helper(looking_for=MSSAI, type_of_committee=TypeOfCom.MSSP.value,
is_important=Important.YES.value,
is_advanced=Advanced.YES.value, teacher=teacher))
error_list.append(assign_helper(looking_for=HSEAI, type_of_committee=TypeOfCom.HSEN.value,
is_important=Important.YES.value,
is_advanced=Advanced.YES.value, teacher=teacher))
error_list.append(assign_helper(looking_for=HSSAI, type_of_committee=TypeOfCom.HSSP.value,
is_important=Important.YES.value,
is_advanced=Advanced.YES.value, teacher=teacher))
# 6th grade assignments
error_list.append(assign_helper(looking_for=G6EN, type_of_committee=TypeOfCom.G6EN.value,
is_important=Important.NO.value,
is_advanced=Advanced.NO.value, teacher=teacher))
# check error list is not empty, then return same page with flash errors, else return user_oldTeacherPage()
# will filter the list for any None values
error_list = list(filter(None, error_list))
if len(error_list) > 0:
flash("Some assignments have been added but we have some issues:")
for error in range(0, len(error_list)):
flash(error_list[error])
num_rem = target - teacher.getNumOfStudents()
return modHelpers.renderNewTeacherPage(session["currentTeacher"], num_rem)
else:
return teacher.returnUserPageOld()
else:
# if incorrect number of assignments, return same page with number of assignments remeaining
num_rem = target - teacher.getNumOfStudents()
flash(
"You have entered an incorrect number of assignments, please try again you have {} delegates to assign.".format(
num_rem))
return modHelpers.renderNewTeacherPage(session["currentTeacher"], num_rem)
flash("An error was encountered please log in again. If the error persists call your HOSPITALITY member.")
return render_template("user_registration.html")
### /goTo (POST -> templateRendered)
### goTo route, takes to the singUp template used after the teacher registers only has POST for button click
@app.route("/goTo", methods=["POST"])
def goTo():
### POST
return render_template("user_signUp.html")
### /userSettingsPage (POST -> templateRendered)
### page where teachers can edit their info
@app.route("/userSettingsPage", methods=["POST", "GET"])
def userSettingsPage():
if request.method == "POST" and not session["currentTeacher"] is None:
teacher_id = request.form["submit"]
teacher = Teacher.query.get(teacher_id)
if request.form["password"] != "":
teacher.changePassword(request.form["password"])
teacher.email = request.form["email"]
teacher.name = request.form["name"]
teacher.school = request.form["school"]
flash("Changes have been made successfully!")
db.session.commit()
return render_template("user_settingsPage.html", teacher=teacher)
elif request.method == "GET" and not session["currentTeacher"] is None:
teacher = Teacher.query.get(session["currentUserId"])
return render_template("user_settingsPage.html", teacher=teacher)
### /user_oldTeacherPage (POST GET -> templateRendered)
### user page old route
### POST: name of student is updated if anything in input bar, else name stays as taken
### GET: the program returns the user_oldTeacherPage()
@app.route("/user_oldTeacherPage", methods=["POST", "GET"])
# @login_required
def user_oldTeacherPage():
### POST
if request.method == "POST" and not session["currentTeacher"] is None:
# grab teacher that is logged in
teacher = Teacher.query.get(session["currentUserId"])
# gets all assginments from table of the current teacher in session
delegates = teacher.delegates
for delegate in delegates:
# asigns y the assignment ID in the table that corresponds to the ID of the input bar
nameID = "N_" + str(delegate.id)
# uses the nameID of the deleagte to get name in html page input bar
delName = request.form[nameID]
# checks if the input bar has a valid name input or not
if delName == "" or delName == " ":
# name is put to blank if the delegate name is blank only
delegate.name = " "
else:
# name is updated to the input string from the bar in teacher's table
delegate.name = delName
# use the id to get grade drop down
gradeID = "G_" + str(delegate.id)
delGrade = request.form[gradeID]
if delGrade != "":
delegate.grade = delGrade
db.session.commit()
# return the user page old with returnUserPageOld()
flash("The names have been changed as requested.")
return teacher.returnUserPageOld()
### GET
elif request.method == "GET" and not session["currentTeacher"] is None:
teacher = Teacher.query.get(session["currentUserId"])
return teacher.returnUserPageOld()
### /userDownload (POST -> templateRendered)
### return printable html file with all the teacher's info
### POST: render user_printAssignments.html with teacher's info
@app.route("/userDownload", methods=["POST"])
# @login_required
def userDownload():
### POST
if request.method == "POST" and not session["currentTeacher"] is None:
# grab teacher logged in
teacher = Teacher.query.get(session["currentUserId"])
# grabs the school name of teacher
school = teacher.school
# grabs all delegates of the teacher
delegates = teacher.delegates
# return the html file with the data
return render_template("user_printAssignments.html", school=school, delegates=delegates)
### /logOut (POST -> templateRendred)
### similiar to signOut but different unknown use
### POST: delete all session info and flashes, return user_registration.html
@app.route("/logOut", methods=["POST"])
##@login_required
def logOut():
if session:
session.clear()
session.pop('_flashes', None)
# logout_user()
return render_template("user_registration.html")
###############################################################################################################################################################
###############################################################################################################################################################
##################################################### Admin Pages ###################################################################################
###############################################################################################################################################################
###############################################################################################################################################################
### /adminOne (POST GET -> templateRendered)
### admin console route
### POST: check the button status and act accordingly
### GET: return retrunAdminPage() for all info
@app.route("/adminOne", methods=["POST", "GET"])
def adminOne():
### GET
if request.method == "GET" and session["adminIn"] == True:
return modHelpers.returnAdminPage("", None)
### POST
if request.method == "POST" and session["adminIn"] == True:
app.jinja_env.globals.update(numOfAssignments=Committee.numOfAssignments)
app.jinja_env.globals.update(numOfImportantAssignments=Committee.numOfImportantAssignments)
app.jinja_env.globals.update(numOfDelegates=Committee.numOfDelegates)
app.jinja_env.globals.update(numOfImportantDelegates=Committee.numOfImportantDelegates)
app.jinja_env.globals.update(codeYellow=Committee.codeYellow)
app.jinja_env.globals.update(codeRed=Committee.codeRed)
#
session["admingCurrentTable"] = ""
# value tells what button was clicked
value = request.form["Button"]
### General Filter Buttons ###
if value == "MS":
assignments = db.session.query(Assignment).join(Committee).filter(
or_(Committee.typeOfCom == TypeOfCom.MSEN.value, Committee.typeOfCom == TypeOfCom.MSSP.value)).all()
session["admingCurrentTable"] = " AND (typeOfCom = 'MS EN' OR typeOfCom = 'MS SP')"
genFilter = "MS"
return modHelpers.returnAdminPage(assignments, genFilter)
elif value == "HS":
assignments = db.session.query(Assignment).join(Committee).filter(
or_(Committee.typeOfCom == TypeOfCom.HSEN.value, Committee.typeOfCom == TypeOfCom.HSSP.value)).all()
session["admingCurrentTable"] = " AND (typeOfCom = 'HS EN' OR typeOfCom = 'HS SP')"
genFilter = "HS"
return modHelpers.returnAdminPage(assignments, genFilter)
elif value == "ALL":
session["admingCurrentTable"] = ""
return modHelpers.returnAdminPage("", None)
elif value == "English":
assignments = db.session.query(Assignment).join(Committee).filter(
or_(Committee.typeOfCom == TypeOfCom.HSEN.value, Committee.typeOfCom == TypeOfCom.MSEN.value)).all()
session["admingCurrentTable"] = " AND (typeOfCom = 'HS EN' OR typeOfCom = 'MS EN')"
genFilter = "English"
return modHelpers.returnAdminPage(assignments, genFilter)
elif value == "Spanish":
assignments = db.session.query(Assignment).join(Committee).filter(
or_(Committee.typeOfCom == TypeOfCom.HSSP.value, Committee.typeOfCom == TypeOfCom.MSSP.value)).all()
session["admingCurrentTable"] = " AND (typeOfCom = 'HS SP' OR typeOfCom = 'MS SP')"
genFilter = "Spanish"
return modHelpers.returnAdminPage(assignments, genFilter)
elif value == "HSEN":
assignments = db.session.query(Assignment).join(Committee).filter(
Committee.typeOfCom == TypeOfCom.HSEN.value).all()
session["admingCurrentTable"] = " AND typeOfCom = 'HS EN'"
genFilter = TypeOfCom.HSEN.value
return modHelpers.returnAdminPage(assignments, genFilter)
elif value == "HSSP":
assignments = db.session.query(Assignment).join(Committee).filter(
Committee.typeOfCom == TypeOfCom.HSSP.value).all()
session["admingCurrentTable"] = " AND typeOfCom = 'HS SP'"
genFilter = TypeOfCom.HSSP.value
return modHelpers.returnAdminPage(assignments, genFilter)
elif value == "MSEN":
assignments = db.session.query(Assignment).join(Committee).filter(
Committee.typeOfCom == TypeOfCom.MSEN.value).all()
session["admingCurrentTable"] = " AND typeOfCom = 'MS EN'"
genFilter = TypeOfCom.MSEN.value
return modHelpers.returnAdminPage(assignments, genFilter)
elif value == "MSSP":
assignments = db.session.query(Assignment).join(Committee).filter(
Committee.typeOfCom == TypeOfCom.MSSP.value).all()
session["admingCurrentTable"] = " AND typeOfCom = 'MS SP'"
genFilter = TypeOfCom.MSSP.value
return modHelpers.returnAdminPage(assignments, genFilter)
elif value == "Taken":
assignments = Assignment.query.filter(or_(Assignment.delegate != None)).all()
session["adminCurrentTable"] = " AND delegate_name != ''"
genFilter = "Taken"
return modHelpers.returnAdminPage(assignments, genFilter)
elif value == "NotTaken":
assignments = Assignment.query.filter(or_(Assignment.delegate == None)).all()
session["adminCurrentTable"] = " AND delegate_name != ''"
genFilter = "Taken"
return modHelpers.returnAdminPage(assignments, genFilter)
### Table with teachers data ###
elif value == "Teachers":
teachers = Teacher.query.all()
return render_template("admin_teachersTable.html", teachers=teachers)
### Table with all delegates ###
elif value == "Delegates":
delegates = Delegate.query.all()
teachers = Teacher.query.order_by(Teacher.name.asc()).all()
return render_template("admin_delegatesTable.html", delegates=delegates, teachers=teachers)
### Table with all committees ###
elif value == "Committees":
committees = Committee.query.order_by(Committee.name.asc()).all()
return render_template("admin_committeeTable.html", committees=committees)
### Generate Code ###
elif value == "GenerateCode":
return render_template("admin_generateCode.html", code="")
### Change room info for committees ### !!! check this
elif value == "changeRooms":
committees = Committee.query.order_by(Committee.name.asc()).all()
return render_template("admin_changeRooms.html", committees=committees)
### Add New Committee ###
elif value == "AddNewCom":
typeOfCom = []
for com in TypeOfCom:
typeOfCom.append(com)
return render_template("admin_addNewCommittee.html", second=False, typeOfCom=typeOfCom)
### Add new Country to committee ###
elif value == "AddNewCon":
comID = int(request.form.get("toCommitteeDropDown"))
session["addNewComitteeID"] = comID
committee = Committee.query.get(comID)
assignments = db.session.query(Assignment).join(Committee).filter(Committee.id == comID)
return render_template("admin_addNewCountry.html", committee=committee, second=False,
assignments=assignments)
### Delete info of all selected rows(assignments) ###
elif value == "DeleteBulkInfo":
rowIds = request.form.getlist("Selected")
for row in rowIds:
assignment = Assignment.query.get(int(row))
if assignment.delegate is not None:
flash("The following committe/country has been stripped of delegate info: {} / {}".format(
assignment.committee.name, assignment.country))
db.session.delete(assignment.delegate)
# commit all deletes
db.session.commit()
### Delete the rows(assignments) selected ###
elif value == "DeleteBulkRow":
rowIds = request.form.getlist("Selected")
for row in rowIds:
assignment = Assignment.query.get(int(row))
flash("The following committe/country and its delegate has been deleted: {} / {}".format(
assignment.committee.name, assignment.country))
# if assignment is realted to delegate, must delete delegate first
modHelpers.deleteAssignment(assignment)
modHelpers.checkAutoCommitteeDelete()
# idArange(Assignment)
### Search parameters ###
elif value == "Search":
com = request.form.get("committeeDropDown")
if com == "None":
isCommitteeSelected = False
else:
comID = int(com)
isCommitteeSelected = True
conName = request.form["countryField"]
if conName != "":
isCountrySelected = True
else:
isCountrySelected = False
if request.form.get("Taken"):
isNotTaken = True
else:
isNotTaken = False
if isCommitteeSelected and isCountrySelected and isNotTaken:
assignments = db.session.query(Assignment).join(Committee).filter(Committee.id == comID,
Assignment.country == conName,
Assignment.delegate == None, ).all()
message = "Committee : {} , Country : {} , Not Taken".format(assignments[0].committee.name, conName)
elif not isCommitteeSelected and isCountrySelected and isNotTaken:
assignments = Assignment.query.filter(Assignment.country == conName, Assignment.delegate == None).all()
message = "Country : {} , Not Taken".format(conName)
elif isCommitteeSelected and not isCountrySelected and isNotTaken:
assignments = db.session.query(Assignment).join(Committee).filter(Committee.id == comID,
Assignment.delegate == None).all()
message = "Committee : {} , Not Taken".format(assignments[0].committee.name)
elif isCommitteeSelected and isCountrySelected and not isNotTaken:
assignments = db.session.query(Assignment).join(Committee).filter(Committee.id == comID,
Assignment.country == conName).all()
message = "Committee : {} , Country : {}".format(assignments[0].committee.name, conName)
elif not isCommitteeSelected and not isCountrySelected and isNotTaken:
assignments = Assignment.query.filter(Assignment.delegate == None).all()
message = "Not Taken"
elif isCommitteeSelected and not isCountrySelected and not isNotTaken:
assignments = db.session.query(Assignment).join(Committee).filter(Committee.id == comID).all()
message = "Committee : {}".format(assignments[0].committee.name)
elif (not isCommitteeSelected) and isCountrySelected and (not isNotTaken):
assignments = Assignment.query.filter(Assignment.country == conName).all()
message = "Country : {}".format(conName)
else:
assignments = Assignment.query.all()
return modHelpers.returnAdminPage(assignments, message)
### Single row buttons ###
# single row buttons only care about the first three characters of button value to decide
listValue = value[0:3]
# Delete Information
if (listValue == "DI_"):
deleteInfo = value[3:]
assignment = Assignment.query.get(int(deleteInfo))
if assignment.delegate is not None:
db.session.delete(assignment.delegate)
db.session.commit()
flash("The following committe/country has been stripped of delegate info: {} / {}".format(
assignment.committee.name, assignment.country))
# Edite Row
elif (listValue == "Ed_"):
edit = value[3:]
assignment = Assignment.query.filter(Assignment.id == int(edit)).first()
return render_template("admin_editAssignment.html", assignment=assignment)
# Delete complete row
elif (listValue == "DR_"):
deleteRow = value[3:]
assignment = Assignment.query.get(int(deleteRow))
flash("The following committe/country has been deleted: {} / {}".format(assignment.committee.name,
assignment.country))
modHelpers.deleteAssignment(assignment)
# idArange("generalList")
modHelpers.checkAutoCommitteeDelete()
return modHelpers.returnAdminPage("", None)
### goes with top function
### /admin_editAssignment (POST -> templateRendered)
### path to admin_editAssignment, will edit the assignment information, button on main admin page
### POST: edit the assignment information as specified
@app.route("/admin_editAssignment", methods=["POST"])
def admin_editAssignment():
### POST
if request.method == "POST" and session["adminIn"] == True:
# get values from webpage
con = request.form["country"]
idx = int(request.form["Button"])
# grab assignment to deal with
assignment = Assignment.query.get(idx)
assignment.country = con
# use .get() because value might be None or not there
if request.form.get("Important") == "on":
assignment.important = Important.YES.value
flash("The following has changed: {} = {} , {} = {}.".format(assignment.country, con, assignment.important,
Important.YES.value))
else:
flash("The following has changed: {} = {} , {} = {} .".format(assignment.country, con, assignment.important,
Important.NO.value))
db.session.commit()
return modHelpers.returnAdminPage("", None)
### /admin_generateCode (POST GET -> templateRendered)
### code generator route admin_generateCode
### POST: math to generate code for teachers
### GET: return admin_generateCode.html
@app.route("/admin_generateCode", methods=["GET", "POST"])
def admin_generateCode():
### GET
if request.method == "GET" and session["adminIn"] == True:
return render_template("admin_generateCode.html", code="")
### POST
if request.method == "POST" and session["adminIn"] == True:
totalnum = (int)(request.form["numOfDel"])
if not totalnum == 0:
firstnum = int(totalnum % 10)
secondnum = int(totalnum / 10)
genfirst = math.ceil((firstnum + 10) * 3)
gensecond = math.ceil((secondnum + 10) * 3)
return render_template("admin_generateCode.html", code=(
str(gensecond) + "".join(random.choice(string.ascii_uppercase) for x in range(4)) + str(
genfirst) + "".join(random.choice(string.ascii_uppercase) for x in range(2))))
return render_template("admin_generateCode.html", code="")
### /admin_addNewCommittee (POST -> templateRendered) !!! might need to add options for room, GET SHOULD USE ENUM stuff
### add new committee, path to admin_addNewCommittee
### POST: two parts, first is committee creation, second is assignment creations
@app.route("/admin_addNewCommittee", methods=["POST"])
def admin_addNewCommittee():
### POST
if request.method == "POST" and session["adminIn"] == True:
value = request.form["Button"]
### Create committee ###
if value == "create":
typeCom = request.form.get("typeOfCom")
com = request.form["committee"]
advanced = request.form.get("advanced")
if advanced != None:
committee = Committee(com, typeCom, "", "Yes")
else:
committee = Committee(com, typeCom, "", "No")
db.session.add(committee)
db.session.commit()
session["committeeInSessionID"] = committee.id
x = int(request.form["number"])
session["numberOfAssignments"] = x
return render_template("admin_addNewCommittee.html", second=True, numberOfAssignments=x,
committee=committee.name, typeOfCom=committee.typeOfCom)
### Create assignments ###
if value == "populate":
committeeID = int(session["committeeInSessionID"])
committee = Committee.query.get(committeeID)
committeeAmount = committee.numOfAssignments()
for num in range(int(session["numberOfAssignments"])):
country = request.form.get(str(num))
important = request.form.get("I" + str(num))
if important != None:
db.session.add(Assignment(committee.id, country, committeeAmount + num + 1, Important.YES.value))
else:
db.session.add(Assignment(committee.id, country, committeeAmount + num + 1, Important.NO.value))
db.session.commit()
# idArange(Assignment)
return modHelpers.returnAdminPage("", None)
return modHelpers.returnAdminPage("", None)
### /admin_addNewCountry (POST -> templateRendered)
### path to admin_addNewCountry, add new assignments to existing committee
### POST: two stages, first gets info from committee, second creates the assignments
@app.route("/admin_addNewCountry", methods=["POST"])
def admin_addNewCountry():
### POST
if request.method == "POST" and session["adminIn"] == True:
value = request.form["Button"]
### Get info from committee ###
if value == "create":
numOfCountries = int(request.form["numOfCon"])
session["numOfCountries"] = numOfCountries
committee = Committee.query.get(int(session["addNewComitteeID"]))
assignments = db.session.query(Assignment).join(Committee).filter(Committee.id == committee.id)
return render_template("admin_addNewCountry.html", second=True, numOfAssignments=numOfCountries,
committee=committee, assigments=assignments)
### Create assignments ###
if value == "populate":
committeeID = int(session["addNewComitteeID"])
committee = Committee.query.get(committeeID)
committeeAmount = committee.numOfAssignments()
for num in range(session["numOfCountries"]):
country = request.form.get(str(num))
important = request.form.get("I" + str(num))
if important != None:
db.session.add(Assignment(committee.id, country, committeeAmount + num + 1, Important.YES.value))
else:
db.session.add(Assignment(committee.id, country, committeeAmount + num + 1, Important.NO.value))
db.session.commit()
# idArange("generalList")
return modHelpers.returnAdminPage("", None)
return modHelpers.returnAdminPage("", None)
### /admin_teachersTable (POST -> templateRendered)
### path to admin_teachersTable, buttons in the teacher information table with all the teachers, edit or delete teacher info
### POST: two types, delete teacher and edit teacher info
@app.route("/admin_teachersTable", methods=["POST"])
def admin_teachersTable():
### POST
if request.method == "POST" and session["adminIn"] == True:
value = request.form["Button"]
listValue = value[0:3]
### Delete teacher row(teacher) ###
if (listValue == "DE_"):
deleteInfo = value[3:]
teacher = Teacher.query.get(int(deleteInfo))
delegates = Delegate.query.filter(Delegate.teacher_id == teacher.id).all()
for delegate in delegates:
db.session.delete(delegate)
db.session.delete(teacher)
flash(
"The table of {} from school {} has been deleted, her info wiped and all her assignments destroyed.".format(
teacher.name, teacher.school))
db.session.commit()
teachers = Teacher.query.all()
return render_template("admin_teachersTable.html", teachers=teachers)
### Edit teacher row(teacher) ###
elif (listValue == "ED_"):
edit = value[3:]
teacher = Teacher.query.filter(Teacher.id == int(edit)).first()
return render_template("admin_teachersTableEdit.html", teacher=teacher)
# return the adminUser.html with all the teachers tables
teachers = Teacher.query.all()
return render_template("admin_teachersTable.html", teachers=teachers)
### goes together with top function /admin_teachersTable
### /admin_teachersTableEdit (POST -> templateRendered)
### path to edirRowUser
### POST: edit a teacher information !!!
@app.route("/admin_teachersTableEdit", methods=["POST"])
def admin_teachersTableEdit():
### POST
if request.method == "POST" and session["adminIn"] == True:
teacherID = request.form["Button"]
if teacherID[0:2] == "NP":
teacher = Teacher.query.get(teacherID[3:])
teacher.changePassword(request.form["password"])
flash("The password has been changed succesfully to {}.".format(request.form["password"]))
else:
teacher = Teacher.query.get(teacherID)
teacher.email = request.form["email"]
teacher.confirmationCode = request.form["ConfCode"]
teacher.school = request.form["school"]
flash(
"The following has changed: {} = {} , {} = {} , {} = {} .".format(teacher.email, request.form["email"],
teacher.school,
request.form["school"],
teacher.confirmationCode,
request.form["ConfCode"]))
db.session.commit()
teachers = Teacher.query.all()
return render_template("admin_teachersTable.html", teachers=teachers)
### /admin_specialFunctions (POST GET -> templateRendered)
### route to /admin_specialFunctions,
### POST: code for the special functions
### GET: reutrn returnAdminSpecialFUnctions()
### !!! implement flash info and improve functions
@app.route("/admin_specialFunctions", methods=["GET", "POST"])
def admin_specialFunctions():
### GET
if request.method == "GET" and session["adminIn"] == True:
return modHelpers.returnAdminSpecialFunctions()
### POST
elif request.method == "POST" and session["adminIn"] == True:
# button value to determin function to call
value = request.form["Button"]
### Delete all the information ###
if value == "DeleteAll":
Delegate.query.delete()
Teacher.query.delete()
Assignment.query.delete()
Committee.query.delete()
### Delete all the info in assignment(countries, committies delegate data) ###
elif value == "DeleteAllCountryInfo":
Delegate.query.delete()
Assignment.query.delete()
Committee.query.delete()
### Delete all teachers and delegate info in assignment ###
elif value == "DeleteAllUserTables":
Delegate.query.delete()
Teacher.query.delete()
### Delete all delegate info in assignment ###
elif value == "DeleteAllDelegateInfo":
Delegate.query.delete()
### Delete an entire committee ###
elif value == "DeleteEntireCommittee":
committeeID = int(request.form.get("committeeDropDown"))
committee = Committee.query.get(committeeID)
modHelpers.deleteAssignments(committee.assignments)
db.session.delete(committee)
db.session.commit()
return modHelpers.returnAdminSpecialFunctions()
### /admin_editDelegate (POST -> templateRendered)
### path to admin_editDelegate, edit teacher information in users table
### POST: edit the teacher information as specified in users table !!!
@app.route("/admin_editDelegate", methods=["POST"])
def admin_editDelegate():
if request.method == "POST" and session["adminIn"] == True:
com = request.form["committee"]
con = request.form["country"]
name = request.form["delegateName"]
school = request.form["delegateSchool"]
idx = request.form["Button"]
password = request.form["password"]
delegate = Delegate.query.get(idx)
if password == "<PASSWORD>.":
flash("The following has changed: {} = {} , {} = {} , {} = {} , {} = {} .".format(
delegate.assignment.committee.name, com, delegate.assignment.country, con, delegate.name, name,
delegate.teacher.school, school))
delegate.assignment.committee.name = com
delegate.assignment.country = con
delegate.name = name
delegate.teacher.school = school
db.session.commit()
else:
flash("The changes were not succesful, wrong password")
return render_template("admin_editDelegate.html", delegate=delegate)
@app.route("/admin_takeMeToDelegate", methods=["POST"])
def admin_takeMeToDelegate():
if request.method == "POST" and session["adminIn"] == True:
idx = request.form["editDelegate"]
delegate = Delegate.query.get(int(idx))
return render_template("admin_editDelegate.html", delegate=delegate)
### /admin_delegatesTables (POST -> templateRendered)
### path to admin_delegatesTables
### POST: !!!
@app.route("/admin_delegatesTables", methods=["POST"])
def admin_delegatesTables():
if request.method == "POST" and session["adminIn"] == True:
value = request.form["Button"]
if value == "Search":
teacherSchoolID = request.form["schoolDropDown"]
delegateName = request.form["delegateName"]
if teacherSchoolID == "None" and delegateName.strip() != "":
delegates = Delegate.query.filter(Delegate.name.contains(delegateName)).all()
flash("Searching for delegate with name {}".format(delegateName))
elif teacherSchoolID != "None" and delegateName.strip() == "":
delegates = db.session.query(Delegate).join(Teacher).filter(Teacher.id == teacherSchoolID).all()
schoolName = db.session.query(Teacher).filter(Teacher.id == teacherSchoolID).first().school
flash("Searching for delegate with school {}".format(schoolName))
elif teacherSchoolID != "None" and delegateName.strip() != "":
delegates = db.session.query(Delegate).join(Teacher).filter(Teacher.id == teacherSchoolID,
Delegate.name.contains(delegateName))
schoolName = db.session.query(Teacher).filter(Teacher.id == teacherSchoolID).first().school
flash("Searching for delegate with name {} in school {}".format(delegateName, schoolName))
else:
delegates = Delegate.query.all()
teachers = Teacher.query.order_by(Teacher.name.asc()).all()
return render_template("admin_delegatesTable.html", delegates=delegates, teachers=teachers)
listValue = value[0:3]
if (listValue == "ED_"):
edit = value[3:]
delegate = Delegate.query.filter(Delegate.id == int(edit)).first()
return render_template("admin_editDelegate.html", delegate=delegate)
elif listValue == "DE_":
delete = int(value[3:])
db.session.delete(Delegate.query.get(delete))
db.session.commit()
delegates = Delegate.query.all()
teachers = Teacher.query.order_by(Teacher.name.asc()).all()
return render_template("admin_delegatesTable.html", delegates=delegates, teachers=teachers)
### /admin_committeeTable (POST -> templateRendered)
### path to admin_committeeTable
### POST: !!!
@app.route("/admin_committeeTable", methods=["POST"])
def admin_committeeTable():
if request.method == "POST" and session["adminIn"] == True:
value = request.form["Button"]
listValue = value[0:3]
if (listValue == "ED_"):
edit = value[3:]
committee = Committee.query.filter(Committee.id == int(edit)).first()
return render_template("admin_editCommittee.html", committee=committee)
elif listValue == "DE_":
delete = int(value[3:])
committee = Committee.query.get(delete)
modHelpers.deleteAssignments(committee.assignments)
db.session.delete(committee)
db.session.commit()
committees = Committee.query.all()
return render_template("admin_committeeTable.html", committees=committees)
### /admin_editCommittee (POST -> templateRendered)
### path to admin_editCommittee, edit teacher information in users table
### POST: edit the teacher information as specified in users table !!!
@app.route("/admin_editCommittee", methods=["POST"])
def admin_editCommittee():
if request.method == "POST" and session["adminIn"] == True:
name = request.form["committee"]
typeOfCom = request.form["typeOfCom"]
room = request.form["room"]
idx = request.form["Button"]
committee = Committee.query.get(idx)
if request.form.get("advanced"):
committee.advanced = "Yes"
else:
flash("The following has changed: {} = {} , {} = {} , {} = {} , {} = {} .".format(committee.name, name,
committee.typeOfCom,
typeOfCom, committee.room,
room, committee.advanced,
"No"))
committee.advanced = "No"
committee.name = name
committee.typeOfCom = typeOfCom
committee.room = room
db.session.commit()
return render_template("admin_editCommittee.html", committee=committee)
@app.route("/admin_takeMeToCommittee", methods=["POST"])
def admin_takeMeToCommittee():
if request.method == "POST" and session["adminIn"] == True:
idx = request.form["editCommittee"]
committee = Committee.query.get(int(idx))
return render_template("admin_editCommittee.html", committee=committee)
### /admin_manualRegister (POST GET -> templateRendered)
### path to admin_manualRegister, button in admin page
### POST: two part, first is get teacher, committee, and room info, second is add assignment to teacher table
### GET: return admin_manualRegister.html with teachers and committees
@app.route("/admin_manualRegister", methods=["GET", "POST"])
def admin_manualRegister():
### GET
if request.method == "GET" and session["adminIn"] == True:
teachers = Teacher.query.all()
committees = Committee.query.all()
return render_template("admin_manualRegister.html", teachers=teachers, committees=committees, second=False)
### POST
elif request.method == "POST" and session["adminIn"] == True:
value = request.form["Button"]
if value == "next":
session["addingTeacherId"] = request.form.get("toTeacher")
teacherID = session["addingTeacherId"]
committeeID = int(request.form["committee"])
session["manualCommitteeID"] = committeeID
assignments = db.session.query(Assignment).join(Committee).filter(Committee.id == committeeID,
Assignment.delegate == None).all()
return render_template("admin_manualRegister.html", second=True, assignments=assignments,
teacher=Teacher.query.get(teacherID), committee=assignments[0].committee)
if value == "assign":
teacherID = session["addingTeacherId"]
comID = int(session["manualCommitteeID"])
committee = Committee.query.get(comID)
countryID = int(request.form.get("country"))
teacher = Teacher.query.get(teacherID)
if teacher.canAddDelegate():
assignment = Assignment.query.get(countryID)
delegate = Delegate("", assignment.id, teacher.id, "")
flash(
"You have assigned {} {} {} to {} .".format(committee.name, committee.typeOfCom, assignment.country,
teacher.name))
db.session.add(delegate)
db.session.commit()
else:
flash(
"Unable to assign, teacher has no spots remaining, unasing another delegate or change Special Code")
return redirect(url_for("admin_manualRegister"))
return modHelpers.returnAdminPage("", None)
### /admin_stats (GET -> templateRendered)
### path to admin_stats, only gives information
### GET: return number of assignments available and total by type
@app.route("/admin_stats", methods=["GET"])
def admin_stats():
### GET
if request.method == "GET" and session["adminIn"] == True:
### Assignments available ###
# regular assignemnts
hsenA = modHelpers.stillAvailable(TypeOfCom.HSEN.value, Important.NO.value, Advanced.NO.value)
hsspA = modHelpers.stillAvailable(TypeOfCom.HSSP.value, Important.NO.value, Advanced.NO.value)
msenA = modHelpers.stillAvailable(TypeOfCom.MSEN.value, Important.NO.value, Advanced.NO.value)
msspA = modHelpers.stillAvailable(TypeOfCom.MSSP.value, Important.NO.value, Advanced.NO.value)
# important assignemnts
mseniA = modHelpers.stillAvailable(TypeOfCom.MSEN.value, Important.YES.value, Advanced.NO.value)
hseniA = modHelpers.stillAvailable(TypeOfCom.HSEN.value, Important.YES.value, Advanced.NO.value)
msspiA = modHelpers.stillAvailable(TypeOfCom.MSSP.value, Important.YES.value, Advanced.NO.value)
hsspiA = modHelpers.stillAvailable(TypeOfCom.HSSP.value, Important.YES.value, Advanced.NO.value)
# advanced assignemnts
msenaA = modHelpers.stillAvailable(TypeOfCom.MSEN.value, Important.NO.value, Advanced.YES.value)
hsenaA = modHelpers.stillAvailable(TypeOfCom.HSEN.value, Important.NO.value, Advanced.YES.value)
msspaA = modHelpers.stillAvailable(TypeOfCom.MSSP.value, Important.NO.value, Advanced.YES.value)
hsspaA = modHelpers.stillAvailable(TypeOfCom.HSSP.value, Important.NO.value, Advanced.YES.value)
# advanced and important assignemnts
msenaiA = modHelpers.stillAvailable(TypeOfCom.MSEN.value, Important.YES.value, Advanced.YES.value)
hsenaiA = modHelpers.stillAvailable(TypeOfCom.HSEN.value, Important.YES.value, Advanced.YES.value)
msspaiA = modHelpers.stillAvailable(TypeOfCom.MSSP.value, Important.YES.value, Advanced.YES.value)
hsspaiA = modHelpers.stillAvailable(TypeOfCom.HSSP.value, Important.YES.value, Advanced.YES.value)
# summation of all the available assignements
totalA = hsenA + hsspA + msenA + msspA + mseniA + hseniA + msspiA + hsspiA + hsenaA + hsspaA + msenaA + msspaA + msenaiA + hsenaiA + msspaiA + hsspaiA
### Assignments total ###
# regular assignemnts
hsenT = modHelpers.maxAssignInGen(TypeOfCom.HSEN.value, Important.NO.value, Advanced.NO.value)
hsspT = modHelpers.maxAssignInGen(TypeOfCom.HSSP.value, Important.NO.value, Advanced.NO.value)
msenT = modHelpers.maxAssignInGen(TypeOfCom.MSEN.value, Important.NO.value, Advanced.NO.value)
msspT = modHelpers.maxAssignInGen(TypeOfCom.MSSP.value, Important.NO.value, Advanced.NO.value)
# important assignemnts
mseniT = modHelpers.maxAssignInGen(TypeOfCom.MSEN.value, Important.YES.value, Advanced.NO.value)
hseniT = modHelpers.maxAssignInGen(TypeOfCom.HSEN.value, Important.YES.value, Advanced.NO.value)
hsspiT = modHelpers.maxAssignInGen(TypeOfCom.HSSP.value, Important.YES.value, Advanced.NO.value)
msspiT = modHelpers.maxAssignInGen(TypeOfCom.MSSP.value, Important.YES.value, Advanced.NO.value)
# advanced assignemnts
msenaT = modHelpers.maxAssignInGen(TypeOfCom.MSEN.value, Important.NO.value, Advanced.YES.value)
hsenaT = modHelpers.maxAssignInGen(TypeOfCom.HSEN.value, Important.NO.value, Advanced.YES.value)
msspaT = modHelpers.maxAssignInGen(TypeOfCom.MSSP.value, Important.NO.value, Advanced.YES.value)
hsspaT = modHelpers.maxAssignInGen(TypeOfCom.HSSP.value, Important.NO.value, Advanced.YES.value)
# advanced and important assignemnts
msenaiT = modHelpers.maxAssignInGen(TypeOfCom.MSEN.value, Important.YES.value, Advanced.YES.value)
hsenaiT = modHelpers.maxAssignInGen(TypeOfCom.HSEN.value, Important.YES.value, Advanced.YES.value)
msspaiT = modHelpers.maxAssignInGen(TypeOfCom.MSSP.value, Important.YES.value, Advanced.YES.value)
hsspaiT = modHelpers.maxAssignInGen(TypeOfCom.HSSP.value, Important.YES.value, Advanced.YES.value)
totalT = hsenT + hsspT + msenT + msspT + mseniT + hseniT + msspiT + hsspiT + hsenaT + hsspaT + msenaT + msspaT + msenaiT + hsenaiT + msspaiT + hsspaiT
committees = Committee.query.all()
# return the template with data
return render_template("admin_stats.html", hsenA=hsenA, hsspA=hsspA, msenA=msenA, msspA=msspA, hsenT=hsenT,
hsspT=hsspT, msspT=msspT, msenT=msenT, totalT=totalT, totalA=totalA, mseniA=mseniA,
hseniA=hseniA, mseniT=mseniT,
hseniT=hseniT, msspiA=msspiA, hsspiA=hsspiA, msspiT=msspiT, hsspiT=hsspiT,
committees=committees,
hsenaA=hsenaA, hsspaA=hsspaA, msenaA=msenaA, msspaA=msspaA, hsenaT=hsenaT,
hsspaT=hsspaT, msspaT=msspaT, msenaT=msenaT, msenaiA=msenaiA, hsenaiA=hsenaiA,
msenaiT=msenaiT,
hsenaiT=hsenaiT, msspaiA=msspaiA, hsspaiA=hsspaiA, msspaiT=msspaiT, hsspaiT=hsspaiT)
### /admin_printCommittee (POST GET -> templateRendered)
### path to admin_printCommittee
### POST: admin print committee
### GET: teacher print their list of assignments
@app.route("/admin_printCommittee", methods=["POST", "GET"])
def admin_printCommittee():
### GET (teacher print)
if request.method == "GET" and session["adminIn"] == True:
lista = []
lista = modHelpers.getComDropDownOpt()
return render_template("admin_printCommittee.html", first=True, second=False, committees=lista)
### POST (admin print)
elif request.method == "POST" and session["adminIn"] == True:
comName = request.form.get("committeeDropDown")
committee = Committee.query.filter(Committee.name == comName).first()
return render_template("admin_printCommittee.html", first=False, second=True, committee=committee,
assignments=committee.assignments)
## /admin_changeRooms (POST -> templateRendered)
## path to admin_changeRooms
## POST: change the room info of committees in generalList and teacher tables
@app.route("/admin_changeRooms", methods=["POST"])
def admin_changeRooms():
### POST
if request.method == "POST" and session["adminIn"] == True:
committees = Committee.query.all()
for committee in committees:
newRoom = request.form[str(committee.id)]
committee.room = newRoom
db.session.commit()
flash("Rooms have been successfully changed.")
return render_template("admin_changeRooms.html", committees=committees)
###############################################################################################################################################################
###############################################################################################################################################################
##################################################### ERROR HANDLERS ##################################################################################
###############################################################################################################################################################
###############################################################################################################################################################
@app.errorhandler(404)
def page_not_found(e):
# note that we set the 404 status explicitly
return render_template("_errorPage.html", number="404"), 404
@app.errorhandler(405)
def error405(e):
# note that we set the 404 status explicitly
return render_template("_errorPage.html", number="405"), 405
@app.errorhandler(403)
def error403(e):
# note that we set the 404 status explicitly
return render_template("_errorPage.html", number="403"), 403
@app.errorhandler(500)
def error500(e):
# note that we set the 404 status explicitly
return render_template("_errorPage.html", number="500"), 500
@app.errorhandler(502)
def error502(e):
# note that we set the 404 status explicitly
return render_template("_errorPage.html", number="502"), 502
if __name__ == "__main__":
app.run()
debug = True
# Made by <NAME>
| 2.265625
| 2
|