max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
tap/tests/test_main.py | cans/tappy-pkg | 0 | 12769751 | # Copyright (c) 2015, <NAME>
import os
from tap.main import main
from tap.tests import TestCase
class TestMain(TestCase):
"""Tests for tap.main.main"""
def test_exits_with_error(self):
"""The main function returns an error status if there were failures."""
argv = ['/bin/fake', 'fake.tap']
stream = open(os.devnull, 'w')
status = main(argv, stream=stream)
self.assertEqual(1, status)
| 2.78125 | 3 |
tests/fixtures/CKANDataSet_fixtures.py | bdolor/bcdc2bcdc | 0 | 12769752 | """[summary]
:return: [description]
:rtype: [type]
"""
import os
import constants
import pytest
import json
import logging
import CKANData
import tests.helpers.CKANDataHelpers as CKANDataHelpers
LOGGER = logging.getLogger(__name__)
@pytest.fixture(scope="session")
def CKANData_User_Data_Raw():
"""returns a user dataset
"""
ckanDataHelper = CKANDataHelpers.CKAN_Test_Data()
ckanTestUserData = ckanDataHelper.getTestUserData()
yield ckanTestUserData
@pytest.fixture(scope="session")
def CKANData_Test_User_Data_Raw(CKANData_User_Data_Raw):
UserData = CKANData_User_Data_Raw[constants.TEST_USER_DATA_POSITION]
UserData['password'] = '<PASSWORD>'
del UserData['id']
del UserData['number_of_edits']
del UserData['email_hash']
del UserData['created']
del UserData['apikey']
LOGGER.debug("user: %s", UserData)
yield UserData
@pytest.fixture(scope="session")
def CKANData_User_Data_Set(CKANData_User_Data_Raw):
ckanUserDataSet = CKANData.CKANUsersDataSet(CKANData_User_Data_Raw)
yield ckanUserDataSet
@pytest.fixture(scope="session")
def CKANData_User_Data_Record(CKANData_User_Data_Set):
ckanUserRecord = CKANData_User_Data_Set.next()
LOGGER.debug(f"ckanUserRecord:{ckanUserRecord}")
#ckanUserDataSet = CKANData.CKANUsersDataSet(CKANData_User_Data_Raw, constants.TRANSFORM_TYPE_USERS)
#yield ckanUserDataSet
yield ckanUserRecord
@pytest.fixture(scope="session")
def CKAN_Cached_Prod_User_Data(TestProdUserCacheJsonfile, CKANWrapperProd):
"""Checks to see if a cache file exists in the junk directory. If it does
load the data from there otherwise will make an api call, cache the data for
next time and then return the org data
This method returns the prod data
"""
if not os.path.exists(TestProdUserCacheJsonfile):
userDataProd = CKANWrapperProd.getUsers(includeData=True)
with open(TestProdUserCacheJsonfile, 'w') as outfile:
json.dump(userDataProd, outfile)
else:
with open(TestProdUserCacheJsonfile) as json_file:
userDataProd = json.load(json_file)
yield userDataProd
@pytest.fixture(scope="session")
def CKAN_Cached_Test_User_Data(TestTestUserCacheJsonfile, CKANWrapperTest):
"""Checks to see if a cache file exists in the junk directory. If it does
load the data from there otherwise will make an api call, cache the data for
next time and then return the org data
This method returns the prod data
"""
if not os.path.exists(TestTestUserCacheJsonfile):
userDataTest = CKANWrapperTest.getUsers(includeData=True)
with open(TestTestUserCacheJsonfile, 'w') as outfile:
json.dump(userDataTest, outfile)
else:
with open(TestTestUserCacheJsonfile) as json_file:
userDataTest = json.load(json_file)
yield userDataTest
@pytest.fixture(scope="session")
def CKAN_Cached_Test_User_Data_Set(CKAN_Cached_Test_User_Data):
ds = CKANData.CKANUsersDataSet(CKAN_Cached_Test_User_Data)
yield ds
@pytest.fixture(scope="session")
def CKAN_Cached_Prod_User_Data_Set(CKAN_Cached_Prod_User_Data):
ds = CKANData.CKANUsersDataSet(CKAN_Cached_Prod_User_Data)
yield ds
@pytest.fixture(scope="session")
def CKAN_Cached_Prod_Org_Data(TestProdOrgCacheJsonFile, CKANWrapperProd):
"""Checks to see if a cache file exists in the junk directory. If it does
load the data from there otherwise will make an api call, cache the data for
next time and then return the org data
This method returns the prod data
"""
#CKANWrapperProd
if not os.path.exists(TestProdOrgCacheJsonFile):
orgDataProd = CKANWrapperProd.getOrganizations(includeData=True)
with open(TestProdOrgCacheJsonFile, 'w') as outfile:
json.dump(orgDataProd, outfile)
else:
with open(TestProdOrgCacheJsonFile) as json_file:
orgDataProd = json.load(json_file)
yield orgDataProd
@pytest.fixture(scope="session")
def CKAN_Cached_Test_Org_Data(TestTestOrgCacheJsonFile, CKANWrapperTest):
"""Checks to see if a cache file exists in the junk directory. If it does
load the data from there otherwise will make an api call, cache the data for
next time and then return the org data
This method returns the prod data
"""
if not os.path.exists(TestTestOrgCacheJsonFile):
orgDataTest = CKANWrapperTest.getOrganizations(includeData=True)
with open(TestTestOrgCacheJsonFile, 'w') as outfile:
json.dump(orgDataTest, outfile)
else:
with open(TestTestOrgCacheJsonFile) as json_file:
orgDataTest = json.load(json_file)
yield orgDataTest
@pytest.fixture(scope="session")
def CKAN_Cached_Test_Org_Data_Set(CKAN_Cached_Test_Org_Data):
ds = CKANData.CKANOrganizationDataSet(CKAN_Cached_Test_Org_Data)
yield ds
@pytest.fixture(scope="session")
def CKAN_Cached_Test_Org_Record(CKAN_Cached_Test_Org_Data_Set):
rec = CKAN_Cached_Test_Org_Data_Set.next()
yield rec
| 2.1875 | 2 |
src/main.py | lyw1217/scraping-news | 0 | 12769753 | from app import *
from threading import Thread
def get_morning_news() :
root_logger.critical("< NEWS > get_morning_new Thread Started ... ")
while True :
d_month = datetime.now().month
d_day = datetime.now().day
d_hour = datetime.now().hour
# 정해진 시간에 뉴스 전송
for key, flag in f_send.items() :
child_logger.debug("< NEWS > running... ")
if d_hour == SEND_HOUR and flag == True :
# 매일경제
if key == 'maekyung' :
status, maekyung = get_maekyung_msg(d_month, d_day)
if status == 200 :
dbout('\r\n' + maekyung)
parent_logger.info("< NEWS > Success get_maekyung_msg()... ")
else :
dbout(f'\r\nStatus : {status}\nMessage : {maekyung}\n')
root_logger.warning(f'Status : {status}\nMessage : {maekyung}')
f_send[key] = False
# 한국경제
elif key == 'hankyung' :
status, hankyung = get_hankyung_issue_today(d_month, d_day)
if status == 200 :
dbout('\r\n' + hankyung)
parent_logger.info("< NEWS > Success get_hankyung_issue_today()... ")
else :
dbout(f'\r\nStatus : {status}\nMessage : {hankyung}\n')
root_logger.warning(f'Status : {status}\nMessage : {hankyung}')
f_send[key] = False
else :
dbout('Err. Wrong Key.')
root_logger.warning('< NEWS > Err. Wrong Key.')
time.sleep(1)
elif d_hour != SEND_HOUR :
f_send[key] = True
time.sleep(60)
def scraping_news() :
th1 = Thread(target=get_morning_news)
th1.start()
th1.join()
if __name__ == '__main__' :
root_logger.critical("============================================")
root_logger.critical("")
root_logger.critical(" < S C R A P E R > S T A R T ")
root_logger.critical(" written by ywlee")
root_logger.critical("============================================")
scraping_news() | 2.515625 | 3 |
jupiter/domain/smart_lists/service/__init__.py | horia141/jupiter | 15 | 12769754 | <gh_stars>10-100
"""Smart list service classes."""
| 1.0625 | 1 |
isi_mip/core/migrations/0006_auto_20170306_1515.py | ISI-MIP/isimip | 4 | 12769755 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-03-06 14:15
from __future__ import unicode_literals
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('core', '0005_headerlink_menu_items'),
]
operations = [
migrations.AlterField(
model_name='headerlink',
name='menu_items',
field=wagtail.core.fields.StreamField((('jump_link', wagtail.core.blocks.StructBlock((('name', wagtail.core.blocks.CharBlock()), ('link', wagtail.core.blocks.CharBlock())))), ('page_link', wagtail.core.blocks.StructBlock((('name', wagtail.core.blocks.CharBlock()), ('page', wagtail.core.blocks.PageChooserBlock()))))), blank=True, null=True),
),
]
| 1.65625 | 2 |
lib/gui/custom_widgets.py | aaman123/faceswap | 2 | 12769756 | <reponame>aaman123/faceswap
#!/usr/bin/env python3
""" Custom widgets for Faceswap GUI """
import logging
import platform
import re
import sys
import tkinter as tk
from tkinter import ttk, TclError
import numpy as np
from .utils import get_config
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class ContextMenu(tk.Menu): # pylint: disable=too-many-ancestors
""" A Pop up menu to be triggered when right clicking on widgets that this menu has been
applied to.
This widget provides a simple right click pop up menu to the widget passed in with `Cut`,
`Copy`, `Paste` and `Select all` menu items.
Parameters
----------
widget: tkinter object
The widget to apply the :class:`ContextMenu` to
Example
-------
>>> text_box = ttk.Entry(parent)
>>> text_box.pack()
>>> right_click_menu = ContextMenu(text_box)
>>> right_click_menu.cm_bind()
"""
def __init__(self, widget):
logger.debug("Initializing %s: (widget_class: '%s')",
self.__class__.__name__, widget.winfo_class())
super().__init__(tearoff=0)
self._widget = widget
self._standard_actions()
logger.debug("Initialized %s", self.__class__.__name__)
def _standard_actions(self):
""" Standard menu actions """
self.add_command(label="Cut", command=lambda: self._widget.event_generate("<<Cut>>"))
self.add_command(label="Copy", command=lambda: self._widget.event_generate("<<Copy>>"))
self.add_command(label="Paste", command=lambda: self._widget.event_generate("<<Paste>>"))
self.add_separator()
self.add_command(label="Select all", command=self._select_all)
def cm_bind(self):
""" Bind the menu to the given widgets Right Click event
After associating a widget with this :class:`ContextMenu` this function should be called
to bind it to the right click button
"""
button = "<Button-2>" if platform.system() == "Darwin" else "<Button-3>"
logger.debug("Binding '%s' to '%s'", button, self._widget.winfo_class())
self._widget.bind(button, lambda event: self.tk_popup(event.x_root, event.y_root))
def _select_all(self):
""" Select all for Text or Entry widgets """
logger.debug("Selecting all for '%s'", self._widget.winfo_class())
if self._widget.winfo_class() == "Text":
self._widget.focus_force()
self._widget.tag_add("sel", "1.0", "end")
else:
self._widget.focus_force()
self._widget.select_range(0, tk.END)
class RightClickMenu(tk.Menu): # pylint: disable=too-many-ancestors
""" A Pop up menu that can be bound to a right click mouse event to bring up a context menu
Parameters
----------
labels: list
A list of label titles that will appear in the right click menu
actions: list
A list of python functions that are called when the corresponding label is clicked on
hotkeys: list, optional
The hotkeys corresponding to the labels. If using hotkeys, then there must be an entry in
the list for every label even if they don't all use hotkeys. Labels without a hotkey can be
an empty string or ``None``. Passing ``None`` instead of a list means that no actions will
be given hotkeys. NB: The hotkey is not bound by this class, that needs to be done in code.
Giving hotkeys here means that they will be displayed in the menu though. Default: ``None``
"""
# TODO This should probably be merged with Context Menu
def __init__(self, labels, actions, hotkeys=None):
logger.debug("Initializing %s: (labels: %s, actions: %s)", self.__class__.__name__, labels,
actions)
super().__init__(tearoff=0)
self._labels = labels
self._actions = actions
self._hotkeys = hotkeys
self._create_menu()
logger.debug("Initialized %s", self.__class__.__name__)
def _create_menu(self):
""" Create the menu based on :attr:`_labels` and :attr:`_actions`. """
for idx, (label, action) in enumerate(zip(self._labels, self._actions)):
kwargs = dict(label=label, command=action)
if isinstance(self._hotkeys, (list, tuple)) and self._hotkeys[idx]:
kwargs["accelerator"] = self._hotkeys[idx]
self.add_command(**kwargs)
def popup(self, event):
""" Pop up the right click menu.
Parameters
----------
event: class:`tkinter.Event`
The tkinter mouse event calling this popup
"""
self.tk_popup(event.x_root, event.y_root)
class ConsoleOut(ttk.Frame): # pylint: disable=too-many-ancestors
""" The Console out section of the GUI.
A Read only text box for displaying the output from stdout/stderr.
All handling is internal to this method. To clear the console, the stored tkinter variable in
:attr:`~lib.gui.Config.tk_vars` ``consoleclear`` should be triggered.
Parameters
----------
parent: tkinter object
The Console's parent widget
debug: bool
``True`` if console output should not be directed to this widget otherwise ``False``
"""
def __init__(self, parent, debug):
logger.debug("Initializing %s: (parent: %s, debug: %s)",
self.__class__.__name__, parent, debug)
super().__init__(parent)
self.pack(side=tk.TOP, anchor=tk.W, padx=10, pady=(2, 0),
fill=tk.BOTH, expand=True)
self._console = _ReadOnlyText(self)
rc_menu = ContextMenu(self._console)
rc_menu.cm_bind()
self._console_clear = get_config().tk_vars['consoleclear']
self._set_console_clear_var_trace()
self._debug = debug
self._build_console()
self._add_tags()
logger.debug("Initialized %s", self.__class__.__name__)
def _set_console_clear_var_trace(self):
""" Set a trace on the consoleclear tkinter variable to trigger :func:`_clear` """
logger.debug("Set clear trace")
self._console_clear.trace("w", self._clear)
def _build_console(self):
""" Build and place the console and add stdout/stderr redirection """
logger.debug("Build console")
self._console.config(width=100, height=6, bg="gray90", fg="black")
self._console.pack(side=tk.LEFT, anchor=tk.N, fill=tk.BOTH, expand=True)
scrollbar = ttk.Scrollbar(self, command=self._console.yview)
scrollbar.pack(side=tk.LEFT, fill="y")
self._console.configure(yscrollcommand=scrollbar.set)
self._redirect_console()
logger.debug("Built console")
def _add_tags(self):
""" Add tags to text widget to color based on output """
logger.debug("Adding text color tags")
self._console.tag_config("default", foreground="#1E1E1E")
self._console.tag_config("stderr", foreground="#E25056")
self._console.tag_config("info", foreground="#2B445E")
self._console.tag_config("verbose", foreground="#008140")
self._console.tag_config("warning", foreground="#F77B00")
self._console.tag_config("critical", foreground="red")
self._console.tag_config("error", foreground="red")
def _redirect_console(self):
""" Redirect stdout/stderr to console Text Box """
logger.debug("Redirect console")
if self._debug:
logger.info("Console debug activated. Outputting to main terminal")
else:
sys.stdout = _SysOutRouter(self._console, "stdout")
sys.stderr = _SysOutRouter(self._console, "stderr")
logger.debug("Redirected console")
def _clear(self, *args): # pylint: disable=unused-argument
""" Clear the console output screen """
logger.debug("Clear console")
if not self._console_clear.get():
logger.debug("Console not set for clearing. Skipping")
return
self._console.delete(1.0, tk.END)
self._console_clear.set(False)
logger.debug("Cleared console")
class _ReadOnlyText(tk.Text): # pylint: disable=too-many-ancestors
""" A read only text widget.
Standard tkinter Text widgets are read/write by default. As we want to make the console
display writable by the Faceswap process but not the user, we need to redirect its insert and
delete attributes.
Source: https://stackoverflow.com/questions/3842155
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.redirector = _WidgetRedirector(self)
self.insert = self.redirector.register("insert", lambda *args, **kw: "break")
self.delete = self.redirector.register("delete", lambda *args, **kw: "break")
class _SysOutRouter():
""" Route stdout/stderr to the given text box.
Parameters
----------
console: tkinter Object
The widget that will receive the output from stderr/stdout
out_type: ['stdout', 'stderr']
The output type to redirect
"""
def __init__(self, console, out_type):
logger.debug("Initializing %s: (console: %s, out_type: '%s')",
self.__class__.__name__, console, out_type)
self._console = console
self._out_type = out_type
self._recolor = re.compile(r".+?(\s\d+:\d+:\d+\s)(?P<lvl>[A-Z]+)\s")
logger.debug("Initialized %s", self.__class__.__name__)
def _get_tag(self, string):
""" Set the tag based on regex of log output """
if self._out_type == "stderr":
# Output all stderr in red
return self._out_type
output = self._recolor.match(string)
if not output:
return "default"
tag = output.groupdict()["lvl"].strip().lower()
return tag
def write(self, string):
""" Capture stdout/stderr """
self._console.insert(tk.END, string, self._get_tag(string))
self._console.see(tk.END)
@staticmethod
def flush():
""" If flush is forced, send it to normal terminal """
sys.__stdout__.flush()
class _WidgetRedirector:
"""Support for redirecting arbitrary widget sub-commands.
Some Tk operations don't normally pass through tkinter. For example, if a
character is inserted into a Text widget by pressing a key, a default Tk
binding to the widget's 'insert' operation is activated, and the Tk library
processes the insert without calling back into tkinter.
Although a binding to <Key> could be made via tkinter, what we really want
to do is to hook the Tk 'insert' operation itself. For one thing, we want
a text.insert call in idle code to have the same effect as a key press.
When a widget is instantiated, a Tcl command is created whose name is the
same as the path name widget._w. This command is used to invoke the various
widget operations, e.g. insert (for a Text widget). We are going to hook
this command and provide a facility ('register') to intercept the widget
operation. We will also intercept method calls on the tkinter class
instance that represents the tk widget.
In IDLE, WidgetRedirector is used in Percolator to intercept Text
commands. The function being registered provides access to the top
of a Percolator chain. At the bottom of the chain is a call to the
original Tk widget operation.
Attributes
-----------
_operations: dict
Dictionary mapping operation name to new function. widget: the widget whose tcl command
is to be intercepted.
tk: widget.tk
A convenience attribute, probably not needed.
orig: str
new name of the original tcl command.
Notes
-----
Since renaming to orig fails with TclError when orig already exists, only one
WidgetDirector can exist for a given widget.
"""
def __init__(self, widget):
self._operations = {}
self.widget = widget # widget instance
self.tk_ = tk_ = widget.tk # widget's root
wgt = widget._w # pylint:disable=protected-access # widget's (full) Tk pathname
self.orig = wgt + "_orig"
# Rename the Tcl command within Tcl:
tk_.call("rename", wgt, self.orig)
# Create a new Tcl command whose name is the widget's path name, and
# whose action is to dispatch on the operation passed to the widget:
tk_.createcommand(wgt, self.dispatch)
def __repr__(self):
return "%s(%s<%s>)" % (self.__class__.__name__,
self.widget.__class__.__name__,
self.widget._w) # pylint:disable=protected-access
def close(self):
"Unregister operations and revert redirection created by .__init__."
for operation in list(self._operations):
self.unregister(operation)
widget = self.widget
tk_ = widget.tk
wgt = widget._w # pylint:disable=protected-access
# Restore the original widget Tcl command.
tk_.deletecommand(wgt)
tk_.call("rename", self.orig, wgt)
del self.widget, self.tk_ # Should not be needed
# if instance is deleted after close, as in Percolator.
def register(self, operation, function):
"""Return _OriginalCommand(operation) after registering function.
Registration adds an operation: function pair to ._operations.
It also adds a widget function attribute that masks the tkinter
class instance method. Method masking operates independently
from command dispatch.
If a second function is registered for the same operation, the
first function is replaced in both places.
"""
self._operations[operation] = function
setattr(self.widget, operation, function)
return _OriginalCommand(self, operation)
def unregister(self, operation):
"""Return the function for the operation, or None.
Deleting the instance attribute unmasks the class attribute.
"""
if operation in self._operations:
function = self._operations[operation]
del self._operations[operation]
try:
delattr(self.widget, operation)
except AttributeError:
pass
return function
return None
def dispatch(self, operation, *args):
"""Callback from Tcl which runs when the widget is referenced.
If an operation has been registered in self._operations, apply the
associated function to the args passed into Tcl. Otherwise, pass the
operation through to Tk via the original Tcl function.
Note that if a registered function is called, the operation is not
passed through to Tk. Apply the function returned by self.register()
to *args to accomplish that. For an example, see colorizer.py.
"""
op_ = self._operations.get(operation)
try:
if op_:
return op_(*args)
return self.tk_.call((self.orig, operation) + args)
except TclError:
return ""
class _OriginalCommand:
"""Callable for original tk command that has been redirected.
Returned by .register; can be used in the function registered.
redir = WidgetRedirector(text)
def my_insert(*args):
print("insert", args)
original_insert(*args)
original_insert = redir.register("insert", my_insert)
"""
def __init__(self, redir, operation):
"""Create .tk_call and .orig_and_operation for .__call__ method.
.redir and .operation store the input args for __repr__.
.tk and .orig copy attributes of .redir (probably not needed).
"""
self.redir = redir
self.operation = operation
self.tk_ = redir.tk_ # redundant with self.redir
self.orig = redir.orig # redundant with self.redir
# These two could be deleted after checking recipient code.
self.tk_call = redir.tk_.call
self.orig_and_operation = (redir.orig, operation)
def __repr__(self):
return "%s(%r, %r)" % (self.__class__.__name__,
self.redir, self.operation)
def __call__(self, *args):
return self.tk_call(self.orig_and_operation + args)
class StatusBar(ttk.Frame): # pylint: disable=too-many-ancestors
""" Status Bar for displaying the Status Message and Progress Bar at the bottom of the GUI.
Parameters
----------
parent: tkinter object
The parent tkinter widget that will hold the status bar
hide_status: bool, optional
``True`` to hide the status message that appears at the far left hand side of the status
frame otherwise ``False``. Default: ``False``
"""
def __init__(self, parent, hide_status=False):
super().__init__(parent)
self.pack(side=tk.BOTTOM, padx=10, pady=2, fill=tk.X, expand=False)
self._message = tk.StringVar()
self._pbar_message = tk.StringVar()
self._pbar_position = tk.IntVar()
self._message.set("Ready")
self._status(hide_status)
self._pbar = self._progress_bar()
@property
def message(self):
""":class:`tkinter.StringVar`: The variable to hold the status bar message on the left
hand side of the status bar. """
return self._message
def _status(self, hide_status):
""" Place Status label into left of the status bar.
Parameters
----------
hide_status: bool, optional
``True`` to hide the status message that appears at the far left hand side of the
status frame otherwise ``False``
"""
if hide_status:
return
statusframe = ttk.Frame(self)
statusframe.pack(side=tk.LEFT, anchor=tk.W, fill=tk.X, expand=False)
lbltitle = ttk.Label(statusframe, text="Status:", width=6, anchor=tk.W)
lbltitle.pack(side=tk.LEFT, expand=False)
lblstatus = ttk.Label(statusframe,
width=40,
textvariable=self._message,
anchor=tk.W)
lblstatus.pack(side=tk.LEFT, anchor=tk.W, fill=tk.X, expand=True)
def _progress_bar(self):
""" Place progress bar into right of the status bar. """
progressframe = ttk.Frame(self)
progressframe.pack(side=tk.RIGHT, anchor=tk.E, fill=tk.X)
lblmessage = ttk.Label(progressframe, textvariable=self._pbar_message)
lblmessage.pack(side=tk.LEFT, padx=3, fill=tk.X, expand=True)
pbar = ttk.Progressbar(progressframe,
length=200,
variable=self._pbar_position,
maximum=100,
mode="determinate")
pbar.pack(side=tk.LEFT, padx=2, fill=tk.X, expand=True)
pbar.pack_forget()
return pbar
def start(self, mode):
""" Set progress bar mode and display,
Parameters
----------
mode: ["indeterminate", "determinate"]
The mode that the progress bar should be executed in
"""
self._set_mode(mode)
self._pbar.pack()
def stop(self):
""" Reset progress bar and hide """
self._pbar_message.set("")
self._pbar_position.set(0)
self._set_mode("determinate")
self._pbar.pack_forget()
def _set_mode(self, mode):
""" Set the progress bar mode """
self._pbar.config(mode=mode)
if mode == "indeterminate":
self._pbar.config(maximum=100)
self._pbar.start()
else:
self._pbar.stop()
self._pbar.config(maximum=100)
def progress_update(self, message, position, update_position=True):
""" Update the GUIs progress bar and position.
Parameters
----------
message: str
The message to display next to the progress bar
position: int
The position that the progress bar should be set to
update_position: bool, optional
If ``True`` then the progress bar will be updated to the position given in
:attr:`position`. If ``False`` the progress bar will not be updates. Default: ``True``
"""
self._pbar_message.set(message)
if update_position:
self._pbar_position.set(position)
class Tooltip:
"""
Create a tooltip for a given widget as the mouse goes on it.
Parameters
----------
widget: tkinter object
The widget to apply the tool-tip to
background: str, optional
The hex code for the background color. Default:'#FFFFEA'
pad: tuple, optional
(left, top, right, bottom) padding for the tool-tip. Default: (5, 3, 5, 3)
text: str, optional
The text to be displayed in the tool-tip. Default: 'widget info'
waittime: int, optional
The time in milliseconds to wait before showing the tool-tip. Default: 400
wraplength: int, optional
The text length for each line before wrapping. Default: 250
Example
-------
>>> button = ttk.Button(parent, text="Exit")
>>> Tooltip(button, text="Click to exit")
>>> button.pack()
Notes
-----
Adapted from StackOverflow: http://stackoverflow.com/questions/3221956 and
http://www.daniweb.com/programming/software-development/code/484591/a-tooltip-class-for-tkinter
"""
def __init__(self, widget, *, background="#FFFFEA", pad=(5, 3, 5, 3), text="widget info",
waittime=400, wraplength=250):
self._waittime = waittime # in milliseconds, originally 500
self._wraplength = wraplength # in pixels, originally 180
self._widget = widget
self._text = text
self._widget.bind("<Enter>", self._on_enter)
self._widget.bind("<Leave>", self._on_leave)
self._widget.bind("<ButtonPress>", self._on_leave)
self._background = background
self._pad = pad
self._ident = None
self._topwidget = None
def _on_enter(self, event=None): # pylint:disable=unused-argument
""" Schedule on an enter event """
self._schedule()
def _on_leave(self, event=None): # pylint:disable=unused-argument
""" remove schedule on a leave event """
self._unschedule()
self._hide()
def _schedule(self):
""" Show the tooltip after wait period """
self._unschedule()
self._ident = self._widget.after(self._waittime, self._show)
def _unschedule(self):
""" Hide the tooltip """
id_ = self._ident
self._ident = None
if id_:
self._widget.after_cancel(id_)
def _show(self):
""" Show the tooltip """
def tip_pos_calculator(widget, label,
*,
tip_delta=(10, 5), pad=(5, 3, 5, 3)):
""" Calculate the tooltip position """
s_width, s_height = widget.winfo_screenwidth(), widget.winfo_screenheight()
width, height = (pad[0] + label.winfo_reqwidth() + pad[2],
pad[1] + label.winfo_reqheight() + pad[3])
mouse_x, mouse_y = widget.winfo_pointerxy()
x_1, y_1 = mouse_x + tip_delta[0], mouse_y + tip_delta[1]
x_2, y_2 = x_1 + width, y_1 + height
x_delta = x_2 - s_width
if x_delta < 0:
x_delta = 0
y_delta = y_2 - s_height
if y_delta < 0:
y_delta = 0
offscreen = (x_delta, y_delta) != (0, 0)
if offscreen:
if x_delta:
x_1 = mouse_x - tip_delta[0] - width
if y_delta:
y_1 = mouse_y - tip_delta[1] - height
offscreen_again = y_1 < 0 # out on the top
if offscreen_again:
# No further checks will be done.
# TIP:
# A further mod might auto-magically augment the wrap length when the tooltip is
# too high to be kept inside the screen.
y_1 = 0
return x_1, y_1
background = self._background
pad = self._pad
widget = self._widget
# Creates a top level window
self._topwidget = tk.Toplevel(widget)
if platform.system() == "Darwin":
# For Mac OS
self._topwidget.tk.call("::tk::unsupported::MacWindowStyle",
"style", self._topwidget._w, # pylint:disable=protected-access
"help", "none")
# Leaves only the label and removes the app window
self._topwidget.wm_overrideredirect(True)
win = tk.Frame(self._topwidget,
background=background,
borderwidth=0)
label = tk.Label(win,
text=self._text,
justify=tk.LEFT,
background=background,
relief=tk.SOLID,
borderwidth=0,
wraplength=self._wraplength)
label.grid(padx=(pad[0], pad[2]),
pady=(pad[1], pad[3]),
sticky=tk.NSEW)
win.grid()
xpos, ypos = tip_pos_calculator(widget, label)
self._topwidget.wm_geometry("+%d+%d" % (xpos, ypos))
def _hide(self):
""" Hide the tooltip """
topwidget = self._topwidget
if topwidget:
topwidget.destroy()
self._topwidget = None
class MultiOption(ttk.Checkbutton): # pylint: disable=too-many-ancestors
""" Similar to the standard :class:`ttk.Radio` widget, but with the ability to select
multiple pre-defined options. Selected options are generated as `nargs` for the argument
parser to consume.
Parameters
----------
parent: :class:`ttk.Frame`
The tkinter parent widget for the check button
value: str
The raw option value for this check button
variable: :class:`tkinter.StingVar`
The master variable for the group of check buttons that this check button will belong to.
The output of this variable will be a string containing a space separated list of the
selected check button options
"""
def __init__(self, parent, value, variable, **kwargs):
self._tk_var = tk.BooleanVar()
self._tk_var.set(value == variable.get())
super().__init__(parent, variable=self._tk_var, **kwargs)
self._value = value
self._master_variable = variable
self._tk_var.trace("w", self._on_update)
self._master_variable.trace("w", self._on_master_update)
@property
def _master_list(self):
""" list: The contents of the check box group's :attr:`_master_variable` in list form.
Selected check boxes will appear in this list. """
retval = self._master_variable.get().split()
logger.trace(retval)
return retval
@property
def _master_needs_update(self):
""" bool: ``True`` if :attr:`_master_variable` requires updating otherwise ``False``. """
active = self._tk_var.get()
retval = ((active and self._value not in self._master_list) or
(not active and self._value in self._master_list))
logger.trace(retval)
return retval
def _on_update(self, *args): # pylint: disable=unused-argument
""" Update the master variable on a check button change.
The value for this checked option is added or removed from the :attr:`_master_variable`
on a ``True``, ``False`` change for this check button.
Parameters
----------
args: tuple
Required for variable callback, but unused
"""
if not self._master_needs_update:
return
new_vals = self._master_list + [self._value] if self._tk_var.get() else [
val
for val in self._master_list
if val != self._value]
val = " ".join(new_vals)
logger.trace("Setting master variable to: %s", val)
self._master_variable.set(val)
def _on_master_update(self, *args): # pylint: disable=unused-argument
""" Update the check button on a master variable change (e.g. load .fsw file in the GUI).
The value for this option is set to ``True`` or ``False`` depending on it's existence in
the :attr:`_master_variable`
Parameters
----------
args: tuple
Required for variable callback, but unused
"""
if not self._master_needs_update:
return
state = self._value in self._master_list
logger.trace("Setting '%s' to %s", self._value, state)
self._tk_var.set(state)
class PopupProgress(tk.Toplevel):
""" A simple pop up progress bar that appears of the center of the root window.
When this is called, the root will be disabled until the :func:`close` method is called.
Parameters
----------
title: str
The title to appear above the progress bar
total: int or float
The total count of items for the progress bar
Example
-------
>>> total = 100
>>> progress = PopupProgress("My title...", total)
>>> for i in range(total):
>>> progress.update(1)
>>> progress.close()
"""
def __init__(self, title, total):
super().__init__()
self._total = total
if platform.system() == "Darwin": # For Mac OS
self.tk.call("::tk::unsupported::MacWindowStyle",
"style", self._w, # pylint:disable=protected-access
"help", "none")
# Leaves only the label and removes the app window
self.wm_overrideredirect(True)
self.attributes('-topmost', 'true')
self.transient()
self._lbl_title = self._set_title(title)
self._progress_bar = self._get_progress_bar()
offset = np.array((self.master.winfo_rootx(), self.master.winfo_rooty()))
# TODO find way to get dimensions of the pop up without it flicking onto the screen
self.update_idletasks()
center = np.array((
(self.master.winfo_width() // 2) - (self.winfo_width() // 2),
(self.master.winfo_height() // 2) - (self.winfo_height() // 2))) + offset
self.wm_geometry("+{}+{}".format(*center))
get_config().set_cursor_busy()
self.grab_set()
@property
def progress_bar(self):
""" :class:`tkinter.ttk.Progressbar`: The progress bar object within the pop up window. """
return self._progress_bar
def _set_title(self, title):
""" Set the initial title of the pop up progress bar.
Parameters
----------
title: str
The title to appear above the progress bar
Returns
-------
:class:`tkinter.ttk.Label`
The heading label for the progress bar
"""
frame = ttk.Frame(self)
frame.pack(side=tk.TOP, padx=5, pady=5)
lbl = ttk.Label(frame, text=title)
lbl.pack(side=tk.TOP, pady=(5, 0), expand=True, fill=tk.X)
return lbl
def _get_progress_bar(self):
""" Set up the progress bar with the supplied total.
Returns
-------
:class:`tkinter.ttk.Progressbar`
The configured progress bar for the pop up window
"""
frame = ttk.Frame(self)
frame.pack(side=tk.BOTTOM, padx=5, pady=(0, 5))
pbar = ttk.Progressbar(frame,
length=400,
maximum=self._total,
mode="determinate")
pbar.pack(side=tk.LEFT)
return pbar
def step(self, amount):
""" Increment the progress bar.
Parameters
----------
amount: int or float
The amount to increment the progress bar by
"""
self._progress_bar.step(amount)
self._progress_bar.update_idletasks()
def stop(self):
""" Stop the progress bar, re-enable the root window and destroy the pop up window. """
self._progress_bar.stop()
get_config().set_cursor_default()
self.grab_release()
self.destroy()
def update_title(self, title):
""" Update the title that displays above the progress bar.
Parameters
----------
title: str
The title to appear above the progress bar
"""
self._lbl_title.config(text=title)
self._lbl_title.update_idletasks()
| 2.421875 | 2 |
rcj_soccer/views/competition.py | rcjaustralia/rcj-soccer-platform | 1 | 12769757 | from rcj_soccer.base import app, db
from rcj_soccer.models import Competition
from flask import render_template, jsonify, request
from datetime import datetime
from dateutil.parser import parse
from rcj_soccer.util import config, obj_to_dict
import logging
logger = logging.getLogger(__name__)
@app.route("/")
def list_competitions():
competitions = Competition.query.filter_by(is_active=True)\
.order_by(Competition.start_date.desc(), Competition.name).all()
return render_template("competitions.html", competitions=competitions,
year=datetime.utcnow().year)
@app.route("/api/competitions")
def api_list_competitions():
competitions = Competition.query.order_by(Competition.start_date).all()
data = []
for competition in competitions:
logger.warn("{0}".format(str(dir(competition))))
data.append(obj_to_dict(competition))
return jsonify(data)
@app.route("/api/competitions/<comp>/<token>",
methods=["GET", "POST", "DELETE", "PUT"])
def api_competition(comp, token):
if request.method == "GET":
competition = Competition.query.filter_by(id=comp).one()
return jsonify(obj_to_dict(competition))
if token != config.get("api", "token"):
return jsonify({"error": "invalid token"})
if request.method == "POST":
body = request.get_json()
competition = Competition()
competition.id = comp
competition.name = body["name"]
competition.fb_link = body["fb_link"]
competition.twitter_link = body["twitter_link"]
competition.event_sponsor_link = body["event_sponsor"]["link"]
competition.event_sponsor_img = body["event_sponsor"]["img"]
competition.is_active = True
competition.start_date = parse(body["start_date"])
db.session.add(competition)
db.session.commit()
return jsonify({"status": "created"})
elif request.method == "DELETE":
competition = Competition.query.filter_by(id=comp).one()
db.session.delete(competition)
db.session.commit()
return jsonify({"status": "deleted"})
elif request.method == "PUT":
competition = Competition.query.filter_by(id=comp).one()
body = request.get_json()
if "name" in body:
competition.name = body["name"]
if "fb_link" in body:
competition.fb_link = body["fb_link"]
if "twitter_link" in body:
competition.twitter_link = body["twitter_link"]
if "active" in body:
competition.is_active = body["active"]
if "start_date" in body:
competition.start_date = parse(body["start_date"])
if "event_sponsor" in body:
if "link" in body["event_sponsor"]:
competition.event_sponsor_link = body["event_sponsor"]["link"]
if "img" in body["event_sponsor"]:
competition.event_sponsor_img = body["event_sponsor"]["img"]
db.session.commit()
return jsonify(obj_to_dict(competition))
def get_competition(id):
competition = Competition.query.filter_by(id=id, is_active=True).first()
return competition
| 2.359375 | 2 |
toy/hello.py | bheavner/python_toy | 0 | 12769758 | """Hello world module"""
def hello_func():
"""return 'Hello!'"""
return "Hello!"
def main():
"""print 'Hello!'"""
print(hello_func())
if __name__ == '__main__':
main()
| 2.46875 | 2 |
jax_cfd/spectral/time_stepping_test.py | rainwangphy/jax-cfd | 0 | 12769759 | <filename>jax_cfd/spectral/time_stepping_test.py<gh_stars>0
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for time_stepping."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax import tree_util
from jax.config import config
import jax.numpy as jnp
from jax_cfd.base import funcutils
from jax_cfd.spectral import time_stepping
import numpy as np
def harmonic_oscillator(x0, t):
theta = jnp.arctan(x0[0] / x0[1])
r = jnp.linalg.norm(x0, ord=2, axis=0)
return r * jnp.stack([jnp.sin(t + theta), jnp.cos(t + theta)])
class CustomODE(time_stepping.ImplicitExplicitODE):
def __init__(self, explicit_terms, implicit_terms, implicit_solve):
self.explicit_terms = explicit_terms
self.implicit_terms = implicit_terms
self.implicit_solve = implicit_solve
ALL_TEST_PROBLEMS = [
# x(t) = np.ones(10)
dict(testcase_name='_zero_derivative',
explicit_terms=lambda x: 0 * x,
implicit_terms=lambda x: 0 * x,
implicit_solve=lambda x, eta: x,
dt=1e-2,
inner_steps=10,
outer_steps=5,
initial_state=np.ones(10),
closed_form=lambda x0, t: x0,
tolerances=[1e-12] * 5),
# x(t) = 5 * t * np.ones(3)
dict(testcase_name='_constant_derivative',
explicit_terms=lambda x: 5 * jnp.ones_like(x),
implicit_terms=lambda x: 0 * x,
implicit_solve=lambda x, eta: x,
dt=1e-2,
inner_steps=10,
outer_steps=5,
initial_state=np.ones(3),
closed_form=lambda x0, t: x0 + 5 * t,
tolerances=[1e-12] * 5),
# x(t) = np.arange(3) * np.exp(t)
# Uses explicit terms only.
dict(testcase_name='_linear_derivative_explicit',
explicit_terms=lambda x: x,
implicit_terms=lambda x: 0 * x,
implicit_solve=lambda x, eta: x,
dt=1e-2,
inner_steps=20,
outer_steps=5,
initial_state=np.arange(3.0),
closed_form=lambda x0, t: np.arange(3) * jnp.exp(t),
tolerances=[5e-2, 1e-4, 1e-6, 1e-9, 1e-6]),
# x(t) = np.arange(3) * np.exp(t)
# Uses implicit terms only.
dict(testcase_name='_linear_derivative_implicit',
explicit_terms=lambda x: 0 * x,
implicit_terms=lambda x: x,
implicit_solve=lambda x, eta: x / (1 - eta),
dt=1e-2,
inner_steps=20,
outer_steps=5,
initial_state=np.arange(3.0),
closed_form=lambda x0, t: np.arange(3) * jnp.exp(t),
tolerances=[5e-2, 5e-5, 1e-5, 1e-5, 3e-5]),
# x(t) = np.arange(3) * np.exp(t)
# Splits the equation into an implicit and explicit term.
dict(testcase_name='_linear_derivative_semi_implicit',
explicit_terms=lambda x: x / 2,
implicit_terms=lambda x: x / 2,
implicit_solve=lambda x, eta: x / (1 - eta / 2),
dt=1e-2,
inner_steps=20,
outer_steps=5,
initial_state=np.arange(3) * np.exp(0),
closed_form=lambda x0, t: np.arange(3.0) * jnp.exp(t),
tolerances=[1e-4, 2e-5, 2e-6, 1e-6, 2e-5]),
dict(testcase_name='_harmonic_oscillator_explicit',
explicit_terms=lambda x: jnp.stack([x[1], -x[0]]),
implicit_terms=jnp.zeros_like,
implicit_solve=lambda x, eta: x,
dt=1e-2,
inner_steps=20,
outer_steps=5,
initial_state=np.ones(2),
closed_form=harmonic_oscillator,
tolerances=[1e-2, 3e-5, 6e-8, 5e-11, 6e-8]),
dict(testcase_name='_harmonic_oscillator_implicit',
explicit_terms=jnp.zeros_like,
implicit_terms=lambda x: jnp.stack([x[1], -x[0]]),
implicit_solve=lambda x, eta: jnp.stack( # pylint: disable=g-long-lambda
[x[0] + eta * x[1], x[1] - eta * x[0]]) / (1 + eta ** 2),
dt=1e-2,
inner_steps=20,
outer_steps=5,
initial_state=np.ones(2),
closed_form=harmonic_oscillator,
tolerances=[1e-2, 2e-5, 2e-6, 1e-6, 6e-6]),
]
ALL_TIME_STEPPERS = [
time_stepping.backward_forward_euler,
time_stepping.crank_nicolson_rk2,
time_stepping.crank_nicolson_rk3,
time_stepping.crank_nicolson_rk4,
time_stepping.imex_rk_sil3,
]
class TimeSteppingTest(parameterized.TestCase):
@parameterized.named_parameters(ALL_TEST_PROBLEMS)
def test_implicit_solve(
self,
explicit_terms,
implicit_terms,
implicit_solve,
dt,
inner_steps,
outer_steps,
initial_state,
closed_form,
tolerances,
):
"""Tests that time integration is accurate for a range of test cases."""
del dt, explicit_terms, inner_steps, outer_steps, closed_form # unused
del tolerances # unused
# Verifies that `implicit_solve` solves (y - eta * F(y)) = x
# This does not test the integrator, but rather verifies that the test
# case is valid.
eta = 0.3
solved_state = implicit_solve(initial_state, eta)
reconstructed_state = solved_state - eta * implicit_terms(solved_state)
np.testing.assert_allclose(reconstructed_state, initial_state)
@parameterized.named_parameters(ALL_TEST_PROBLEMS)
def test_integration(
self,
explicit_terms,
implicit_terms,
implicit_solve,
dt,
inner_steps,
outer_steps,
initial_state,
closed_form,
tolerances,
):
# Compute closed-form solution.
time = dt * inner_steps * (1 + np.arange(outer_steps))
expected = jax.vmap(closed_form, in_axes=(None, 0))(
initial_state, time)
# Compute trajectory using time-stepper.
for atol, time_stepper in zip(tolerances, ALL_TIME_STEPPERS):
with self.subTest(time_stepper.__name__):
equation = CustomODE(explicit_terms, implicit_terms, implicit_solve)
semi_implicit_step = time_stepper(equation, dt)
integrator = funcutils.trajectory(
funcutils.repeated(semi_implicit_step, inner_steps), outer_steps)
_, actual = integrator(initial_state)
np.testing.assert_allclose(expected, actual, atol=atol, rtol=0)
def test_pytree_state(self):
equation = CustomODE(
explicit_terms=lambda x: tree_util.tree_map(jnp.zeros_like, x),
implicit_terms=lambda x: tree_util.tree_map(jnp.zeros_like, x),
implicit_solve=lambda x, eta: x,
)
u0 = {'x': 1.0, 'y': 1.0}
for time_stepper in ALL_TIME_STEPPERS:
with self.subTest(time_stepper.__name__):
u1 = time_stepper(equation, 1.0)(u0)
self.assertEqual(u0, u1)
if __name__ == '__main__':
config.update('jax_enable_x64', True)
absltest.main()
| 2.25 | 2 |
mangadex/routes/base.py | mansuf/mangadex.py | 0 | 12769760 | import sys
import aiohttp
from .. import __version__
__all__ = (
'BaseRoute', 'RequireLogin', 'GET',
'POST', 'PUT', 'DELETE',
)
class BaseRoute:
BASE_URL = 'https://api.mangadex.org'
user_agent = 'mangadex.py (https://github.com/mansuf/mangadex.py {0}) Python/{1[0]}.{1[1]} aiohttp/{2}'.format(
__version__, sys.version_info, aiohttp.__version__
)
headers = {"User-Agent": user_agent}
def build_request(self, path: str):
return {
"method": self.method,
"url": self.BASE_URL + path,
"headers": self.headers
}
class RequireLogin:
def set_auth_token(self, token: str):
if not isinstance(token, str):
raise ValueError('token must be str')
self.headers['Authorization'] = f'Bearer {token}'
class GET(BaseRoute):
method = 'GET'
class POST(BaseRoute):
method = 'POST'
class PUT(BaseRoute):
method = 'PUT'
class DELETE(BaseRoute):
method = 'DELETE' | 2.421875 | 2 |
Python/Fundamentals/Extras/Stores_And_Products/store_test.py | handtjaxon1/Coding-Dojo-Development | 0 | 12769761 | <filename>Python/Fundamentals/Extras/Stores_And_Products/store_test.py<gh_stars>0
from store import *
from product import *
# Create a store and some products
convenience_store = Store("Kwik E-Mart")
donut = Product("Donut", 1.99, ProductCategory.Food)
beer = Product("Beer", 6.99, ProductCategory.Beverages)
pacifier = Product("Pacifier", 4.99, ProductCategory.Baby)
broom = Product("Broom", 14.99, ProductCategory.Cleaning)
laptop = Product("Laptop", 499.99, ProductCategory.Electronics)
# TODO Could create a function that takes a list of products so we could add products in one function call
# Add products
convenience_store.add_product(donut)
convenience_store.add_product(beer)
convenience_store.add_product(pacifier)
convenience_store.add_product(broom)
convenience_store.add_product(laptop)
# Test the various store functionalities and display the changes at each stage
convenience_store.display_products()
convenience_store.inflation(0.02)
convenience_store.display_products()
convenience_store.set_clearance(ProductCategory.Electronics, 0.25)
convenience_store.display_products()
convenience_store.sell_product(len(convenience_store.products) - 1)
convenience_store.sell_product(1)
convenience_store.display_products() | 3.484375 | 3 |
mac/pyobjc-core/libffi-src/tests/dejagnu.py | albertz/music-player | 132 | 12769762 | #!/usr/bin/python
"""
A very crude emulator of dejagnu, just enough to integrate the libbfi
unittests into the pyobjc ones.
"""
import os
import re
import sys
import signal
from fnmatch import fnmatch
import unittest
from distutils.util import get_platform
gDgCommands=re.compile(r'''
(?:{\s*(dg-do)\s*run\s*({[^}]*})?\s*})
|
(?:{\s*(dg-output)\s*"([^"]*)"\s*})
''',
re.VERBOSE|re.MULTILINE)
def signame(code):
for nm in dir(signal):
if nm.startswith('SIG') and nm[3] != '_' \
and getattr(signal, nm) == code:
return nm
return code
def exitCode2Description(code):
"""
Convert the exit code as returned by os.popen().close() to a string
"""
if os.WIFEXITED(code):
return 'exited with status %s'%(os.WEXITSTATUS(code),)
elif os.WIFSIGNALED(code):
sig = os.WTERMSIG(code)
return 'crashed with signal %s [%s]'%(signame(sig), sig)
else:
return 'exit code %s'%(code,)
def platform_matches(matchstr):
# This is a hack
if sys.byteorder == 'little':
platform = 'i386-apple-darwin'
else:
platform = 'powerpc-apple-darwin'
return fnmatch(platform, matchstr)
def parseDG(fdata):
result = []
for item in gDgCommands.findall(fdata):
if item[0] == 'dg-do':
result.append(('run', item[1]))
elif item[2] == 'dg-output':
result.append(('expect', item[3].decode('string_escape')))
return result
class DgTestCase (unittest.TestCase):
def __init__(self, filename):
unittest.TestCase.__init__(self)
self.filename = filename
#archOption = "-arch ppc"
#archOption = "-arch ppc64"
#archOption = "-arch i386"
archOption = "-arch x86_64"
#archOption = ""
compileOptionsBase = "-g -DMACOSX -Iinclude -o /tmp/test.bin -lffi"
compileOptionsList = ( # HACK ALERT: Yes, there are better ways to do this, but this is easy and extremely flexible
"%s %s %s" % (compileOptionsBase, archOption, "-O0"),
"%s %s %s" % (compileOptionsBase, archOption, "-O1"),
"%s %s %s" % (compileOptionsBase, archOption, "-O2"),
"%s %s %s" % (compileOptionsBase, archOption, "-O3"),
"%s %s %s" % (compileOptionsBase, archOption, "-Os"),
"%s %s %s" % (compileOptionsBase, archOption, "-Oz"), # Note: Apple-Only, see gcc man page for details
)
def runTest(self):
script = parseDG(open(self.filename).read())
output = []
for command, data in script:
if command == 'run':
action = 'run'
action_data = data
if command == 'expect':
output.append(data)
output = ''.join(output)
output = output.replace('\\', '')
d = action_data.split()
if d and d[1] == 'target':
for item in d[2:]:
if platform_matches(item):
break
else:
# Test shouldn't be run on this platform
return
# NOTE: We're ignoring the xfail data for now, none of the
# testcases are supposed to fail on darwin.
for compileOptions in self.compileOptionsList:
self.compileTestCase(compileOptions)
data = self.runTestCase()
if output != '':
self.assertEquals(data.rstrip(), output.rstrip())
os.unlink('/tmp/test.bin')
def shortDescription(self):
fn = os.path.basename(self.filename)[:-2]
dn = os.path.basename(os.path.dirname(self.filename))
return "dejagnu.%s.%s"%(dn, fn)
def compileTestCase(self, compileOptions):
# libdir = os.path.join('build', 'temp.%s-%d.%d'%(get_platform(), sys.version_info[0], sys.version_info[1]), 'libffi-src')
# libffiobjects = self.object_files(libdir)
commandline='cc %s %s 2>&1' % (compileOptions, self.filename)
fp = os.popen(commandline)
data = fp.read()
xit = fp.close()
if xit != None:
self.fail("Compile failed[%s]:\n%s"%(xit, data))
def runTestCase(self):
os.environ['DYLD_BIND_AT_LAUNCH'] = '1'
fp = os.popen('/tmp/test.bin', 'r')
del os.environ['DYLD_BIND_AT_LAUNCH']
data = fp.read()
xit = fp.close()
if xit != None:
self.fail("Running failed (%s)"%(exitCode2Description(xit),))
return data
def object_files(self, basedir):
result = []
for dirpath, dirnames, filenames in os.walk(basedir):
for fn in filenames:
if fn.endswith('.o'):
result.append(os.path.join(dirpath, fn))
return result
def testSuiteForDirectory(dirname):
tests = []
for fn in os.listdir(dirname):
if not fn.endswith('.c'): continue
tst = DgTestCase(os.path.join(dirname, fn))
if alltests and tst.shortDescription() not in alltests:
continue
tests.append(tst)
return unittest.TestSuite(tests)
alltests = []
if __name__ == "__main__":
alltests = sys.argv[1:]
runner = unittest.TextTestRunner(verbosity=2)
runner.run(testSuiteForDirectory('tests/testsuite/libffi.call'))
| 2.5 | 2 |
nirvana/data/bin2d.py | kbwestfall/BarFit | 0 | 12769763 | <filename>nirvana/data/bin2d.py
"""
Two-dimensional binning routines.
.. include common links, assuming primary doc root is up one directory
.. include:: ../include/links.rst
"""
import warnings
import numpy as np
from matplotlib import pyplot
from vorbin.voronoi_2d_binning import voronoi_2d_binning
from .util import get_map_bin_transformations, fill_matrix
class VoronoiBinning:
"""
Class that wraps Voronoi binning code.
This is (mostly) copied from the MaNGA Data Analysis Pipeline.
"""
def __init__(self):
self.covar = None
def sn_calculation_no_covariance(self, index, signal, noise):
"""
S/N calculation for independent data.
Args:
index (`numpy.ndarray`_):
Indices of the measurements in a single bin.
signal (`numpy.ndarray`_):
The signal measurements.
noise (`numpy.ndarray`_):
The noise measurements.
Returns:
:obj:`float`: The nominal signal-to-noise reached by
summing the measurements selected by ``index``.
"""
return np.sum(signal[index]) / np.sqrt(np.sum(noise[index]**2))
def sn_calculation_covariance_matrix(self, index, signal, noise):
"""
Calculate the S/N using a full covariance matrix.
The method uses the internal :attr:`covar`.
Args:
index (`numpy.ndarray`_):
Indices of the measurements in a single bin.
signal (`numpy.ndarray`_):
The signal measurements.
noise (`numpy.ndarray`_):
The noise measurements.
Returns:
:obj:`float`: The nominal signal-to-noise reached by
summing the measurements selected by ``index``, including
any covariance.
"""
return np.sum(signal[index]) / np.sqrt(np.sum(self.covar[np.ix_(index, index)]))
@classmethod
def bin_index(cls, x, y, signal, noise, target_snr, show=False):
"""
Bin the data and return the indices of the bins.
Args:
x (`numpy.ndarray`_):
Fiducial Cartesian X position. Shape must match ``signal``.
y (`numpy.ndarray`_):
Fiducial Cartesian Y position. Shape must match ``signal``.
signal (`numpy.ndarray`_):
The signal measurements. Shape must be 1D.
noise (`numpy.ndarray`_, `scipy.sparse.csr_matrix`_):
The noise measurements. Shape can be 1D or 2D. If 2D, assumed
to be a covariance matrix. The 1D size or the length along one
axis of a 2D array must match ``signal``.
target_snr (:obj:`float`):
Target minimum S/N for the bins.
show (:obj:`bool`, optional):
Show the default plot with the binning results.
Returns:
`numpy.ndarray`_: An integer bin index for each position.
Raises:
ValueError:
Raised if the sizes of ``x`` and ``y`` do not match, or if
various checks of the signal, noise, and/or covariance elements
are incorrectly matched.
"""
self = cls()
# Check the position input
if signal.ndim != 1:
raise ValueError('Signal values must be in a 1D array.')
if signal.shape != x.shape:
raise ValueError('Shape of signal does not match coordinates.')
if x.shape != y.shape:
raise ValueError('Shape of x and y coordinates do not match!')
if noise.ndim == 1:
if noise.shape != x.shape:
raise ValueError('Shape of noise does not match coordinates.')
_noise = np.atleast_1d(noise)
self.covar = None
sn_func = self.sn_calculation_no_covariance
if noise.ndim == 2:
if noise.shape[0] != x.size:
raise ValueError('Single axis length of covariance does not match coordinates.')
if noise.shape[0] != noise.shape[1]:
raise ValueError('Covariance arrays must be square.')
_noise = np.sqrt(np.diag(noise))
self.covar = noise.copy()
sn_func = self.sn_calculation_covariance_matrix
# All spaxels have S/N greater than threshold, so return each
# spaxel in its own "bin"
if np.min(signal/_noise) > target_snr:
warnings.warn('All pixels have enough S/N. Binning is not needed')
return np.arange(signal.size)
# Cannot reach the S/N using all spaxels, so return all spaxels
# in a single bin
sn_total = sn_func(np.arange(signal.size), signal, _noise)
if sn_total < target_snr:
warnings.warn('Cannot reach target S/N using all data; all data included in one bin.')
return np.zeros(signal.size)
# Bin the data
try:
binid, xNode, yNode, xBar, yBar, sn, area, scale = \
voronoi_2d_binning(x, y, signal, _noise, target_snr, sn_func=sn_func,
plot=show)
if show:
pyplot.show()
except:
warnings.warn('Binning algorithm has raised an exception. Assume this is because '
'all the spaxels should be in the same bin.')
binid = numpy.zeros(signal.size)
return binid
# TODO: Include an optional weight map?
class Bin2D:
r"""
A utility class for handling two-dimensional binning.
The core functionality of the class is to compute a set of transformations
using :func:`~nirvana.data.util.get_map_bin_transformations` and provide
convenience methods that apply and revert those transformations. See that
function for further documentation and argument/attribute descriptions.
Args:
spatial_shape (:obj:`tuple`, optional):
The 2D spatial shape of the mapped data. Ignored if ``binid`` is
provided.
binid (`numpy.ndarray`_, optional):
The 2D array providing the 0-indexed bin ID number associated with
each map element. Bin IDs of -1 are assumed to be ignored; no bin ID
can be less than -1. Shape is ``spatial_shape`` and its size (i.e.
the number of grid points in the map) is :math:`N_{\rm spaxel}`.
Attributes:
spatial_shape (:obj:`tuple`):
2D array shape
ubinid (`numpy.ndarray`_):
1D vector with the sorted list of *unique* bin IDs. Shape is
:math:`(N_{\rm bin},)`. If ``binid`` is not provided on
instantiation, this is None.
nbin (`numpy.ndarray`_):
1D vector with the number of spaxels in each bin. Shape is
:math:`(N_{\rm bin},)`. If ``binid`` is not provided on
instantiation, this is just a vector of ones. The number of bins can
also be determined from :attr:`bin_transform`.
ubin_indx (`numpy.ndarray`_):
The index vector used to select the unique bin values from a
flattened map of binned data, *excluding* any element with ``binid
== -1``. Shape is :math:`(N_{\rm bin},)`. If ``binid`` is not
provided on instantiation, this is identical to :attr:`grid_indx`.
grid_indx (`numpy.ndarray`_):
The index vector used to select valid grid cells in the input maps;
i.e., any grid point with a valid bin ID (``binid != -1``). Shape is
:math:`(N_{\rm valid},)`.
bin_inverse (`numpy.ndarray`_):
The index vector applied to a recover the mapped data given the
unique quantities, when used in combination with :attr:`grid_indx`.
Shape is :math:`(N_{\rm valid},)`.
bin_transform (`scipy.sparse.csr_matrix`_):
A sparse matrix used to construct the binned (averaged) quantities
from a full 2D map. Shape is :math:`(N_{\rm bin}, N_{\rm spaxel})`.
unravel (:obj:`tuple`):
The :obj:`tuple` of `numpy.ndarray`_ objects that provide the
indices of :attr:`grid_indx` in the 2D array.
"""
def __init__(self, spatial_shape=None, binid=None):
self.spatial_shape = spatial_shape if binid is None else binid.shape
self.ubinid, self.nbin, self.ubin_indx, self.grid_indx, self.bin_inverse, \
self.bin_transform = get_map_bin_transformations(spatial_shape=spatial_shape,
binid=binid)
self.unravel = np.unravel_index(self.grid_indx, self.spatial_shape)
def bin(self, data):
"""
Provided a set of mapped data, bin it according to the internal bin ID
map.
Based on the construction of :attr:`bin_transform` by
:func:`~nirvana.data.util.get_map_bin_transformations`, this computes
the average value of the data in each bin.
Args:
data (`numpy.ndarray`_):
Data to bin. Shape must match :attr:`spatial_shape`.
Returns:
`numpy.ndarray`_: A vector with the binned data.
Raises:
ValueError:
Raised if the shape of the input array is incorrect.
"""
if data.shape != self.spatial_shape:
raise ValueError('Data to rebin has incorrect shape; expected {0}, found {1}.'.format(
self.spatial_shape, data.shape))
return self.bin_transform.dot(data.ravel())
def deriv_bin(self, data, deriv):
"""
Provided a set of mapped data, rebin it to match the internal vectors
and propagate the derivatives in the data.
This method is most often used to bin maps of model data to match the
binning of observed data.
This method is identical to :func:`bin`, except that it allows for
propagation of derivatives of the provided model with respect to its
parameters. The propagation of derivatives for any single parameter is
identical to calling :func:`bin` on that derivative map.
Args:
data (`numpy.ndarray`_):
Data to rebin. Shape must match :attr:`spatial_shape`.
deriv (`numpy.ndarray`_):
If the input data is a model, this provides the derivatives of
model w.r.t. its parameters. The shape must be 3D and the first
two axes of the array must have a shape that matches
:attr:`spatial_shape`.
Returns:
:obj:`tuple`: Two `numpy.ndarray`_ arrays. The first provides the
vector with the data rebinned to match the number of unique
measurements available, and the second is a 2D array with the binned
derivatives for each model parameter.
Raises:
ValueError:
Raised if the spatial shapes of the input arrays are incorrect.
"""
if data.shape != self.spatial_shape:
raise ValueError('Data to rebin has incorrect shape; expected {0}, found {1}.'.format(
self.spatial_shape, data.shape))
if deriv.shape[:2] != self.spatial_shape:
raise ValueError('Derivative shape is incorrect; expected {0}, found {1}.'.format(
self.spatial_shape, deriv.shape[:2]))
return self.bin_transform.dot(data.ravel()), \
np.stack(tuple([self.bin_transform.dot(deriv[...,i].ravel())
for i in range(deriv.shape[-1])]), axis=-1)
def bin_covar(self, covar):
"""
Calculate the covariance in the binned data provided the unbinned
covariance.
Args:
covar (`numpy.ndarray`_, `scipy.sparse.csr_matrix`_):
Covariance in the unbinned data.
Returns:
`scipy.sparse.csr_matrix`_: Covariance in the binned data.
"""
return self.bin_transform.dot(covar.dot(self.bin_transform.T))
# TODO: Include error calculations?
def bin_moments(self, norm, center, stddev):
r"""
Bin a set of Gaussian moments.
Assuming the provided data are the normalization, mean, and standard
deviation of a set of Gaussian profiles, this method performs a nominal
calculation of the moments of the summed Gaussian profile.
.. note::
Any of the input arguments can be None, but at least one of them
cannot be!
Args:
norm (`numpy.ndarray`_):
Gaussian normalization. Shape must match :attr:`spatial_shape`.
center (`numpy.ndarray`_):
Gaussian center. Shape must match :attr:`spatial_shape`.
stddev (`numpy.ndarray`_, optional):
Gaussian standard deviation. Shape must match
:attr:`spatial_shape`.
Returns:
:obj:`tuple`: A tuple of three `numpy.ndarray`_ objects with the
binned normalization, mean, and standard deviation of the summed
profile. If ``norm`` is None on input, the returned 0th moment is 1
everywhere. If ``center`` is None on input, the returned 1st moment
is 0 everywhere. If ``stddev`` is None on input, the returned 2nd
moment is 1 everywhere.
"""
if all([x is None for x in [norm, center, stddev]]):
raise ValueError('At least one parameter must not be None.')
shape = [x.shape for x in [norm, center, stddev] if x is not None][0]
if not all([x is None or x.shape == shape for x in [norm, center, stddev]]):
raise ValueError('Shape of all input arrays must match.')
_norm = np.ones(shape, dtype=float) if norm is None else norm
mom0 = self.bin(_norm)
if center is None and stddev is None:
return mom0, None, None
inv_mom0 = 1/(mom0 + (mom0 == 0.))
_center = np.zeros(shape, dtype=float) if center is None else center
mom1 = self.bin(_norm*_center) * inv_mom0
if stddev is None:
return mom0, mom1, None
_var = _center**2 + stddev**2
mom2 = self.bin(_norm*_var) * inv_mom0 - mom1**2
mom2[mom2 < 0] = 0.
return mom0, mom1, np.sqrt(mom2)
def deriv_bin_moments(self, norm, center, stddev, dnorm, dcenter, dstddev):
r"""
Bin a set of Gaussian moments and propagate the calculation for the
derivatives.
This method is identical to :func:`bin_moments`, except it includes the
propagation of the derivatives.
.. note::
Any of the first three input arguments can be None, but at least one of them
cannot be!
Args:
norm (`numpy.ndarray`_):
Gaussian normalization. Shape must match :attr:`spatial_shape`.
Can be None.
center (`numpy.ndarray`_):
Gaussian center. Shape must match :attr:`spatial_shape`. Can
be None.
stddev (`numpy.ndarray`_, optional):
Gaussian standard deviation. Shape must match
:attr:`spatial_shape`. Can be None.
dnorm (`numpy.ndarray`_):
Derivative of the Gaussian normalization with respect to model
parameters. Shape of the first two axes must match
:attr:`spatial_shape`. Can be None.
dcenter (`numpy.ndarray`_):
Derivative of the Gaussian center with respect to model
parameters. Shape of the first two axes must match
:attr:`spatial_shape`. Can be None.
dstddev (`numpy.ndarray`_, optional):
Derivative of the Gaussian standard deviation with respect to
model parameters. Shape of the first two axes must match
:attr:`spatial_shape`. Can be None.
Returns:
:obj:`tuple`: A tuple of three `numpy.ndarray`_ objects with the
binned normalization, mean, and standard deviation of the summed
profile. If ``norm`` is None on input, the returned 0th moment is 1
everywhere. If ``center`` is None on input, the returned 1st moment
is 0 everywhere. If ``stddev`` is None on input, the returned 2nd
moment is 1 everywhere.
"""
# Check input
if all([x is None for x in [norm, center, stddev]]):
raise ValueError('At least one parameter must not be None.')
shape = [x.shape for x in [norm, center, stddev] if x is not None][0]
if not all([x is None or x.shape == shape for x in [norm, center, stddev]]):
raise ValueError('Shape of all input arrays must match.')
if norm is None and dnorm is not None:
raise ValueError('Must provide normalization if providing its derivative.')
if center is None and dcenter is not None:
raise ValueError('Must provide center if providing its derivative.')
if stddev is None and dstddev is not None:
raise ValueError('Must provide standard deviation if providing its derivative.')
npar = [x.shape[-1] for x in [dnorm, dcenter, dstddev] if x is not None][0]
if any([x.shape != shape + (npar,) for x in [dnorm, dcenter, dstddev] if x is not None]):
raise ValueError('All derivative arrays must have the same shape.')
_norm = np.ones(shape, dtype=float) if norm is None else norm
mom0 = self.bin(_norm)
dmom0 = None if dnorm is None else \
np.stack(tuple([self.bin_transform.dot(dnorm[...,i].ravel())
for i in range(npar)]), axis=-1)
if center is None and stddev is None:
# Everything else is None, so we're done.
return mom0, None, None, dmom0, None, None
inv_mom0 = 1/(mom0 + (mom0 == 0.))
_center = np.zeros(shape, dtype=float) if center is None else center
mom1 = self.bin(_norm*_center) * inv_mom0
dmom1 = None
if dnorm is not None:
dmom1 = np.stack(tuple([(self.bin_transform.dot((_center*dnorm[...,i]).ravel())
- mom1 * dmom0[...,i]) * inv_mom0
for i in range(npar)]), axis=-1)
if dcenter is not None:
_dmom1 = np.stack(tuple([self.bin_transform.dot((_norm*dcenter[...,i]).ravel())
* inv_mom0 for i in range(npar)]), axis=-1)
dmom1 = _dmom1 if dmom1 is None else dmom1 + _dmom1
if stddev is None:
return mom0, mom1, None, dmom0, dmom1, None
_var = _center**2 + stddev**2
mom2 = self.bin(_norm*_var) * inv_mom0 - mom1**2
mom2[mom2 < 0] = 0.
_mom2 = np.sqrt(mom2)
dmom2 = None
if dnorm is not None:
dmom2 = np.stack(tuple([(self.bin_transform.dot((_var*dnorm[...,i]).ravel())
- mom2 * dmom0[...,i]) * inv_mom0
for i in range(npar)]), axis=-1)
if dcenter is not None:
_dmom2 = np.stack(tuple([self.bin_transform.dot(
(2*_norm*_center*dcenter[...,i]).ravel()) * inv_mom0
- 2 * mom1 * dmom1[...,i] for i in range(npar)]), axis=-1)
dmom2 = _dmom2 if dmom2 is None else dmom2 + _dmom2
if dstddev is not None:
_dmom2 = np.stack(tuple([self.bin_transform.dot(
(2*_norm*stddev*dstddev[...,i]).ravel()) * inv_mom0
for i in range(npar)]), axis=-1)
dmom2 = _dmom2 if dmom2 is None else dmom2 + _dmom2
if dmom2 is not None:
_inv_mom2 = 1./(_mom2 + (_mom2 == 0.0))
dmom2 = dmom2 * _inv_mom2[...,None] / 2
return mom0, mom1, _mom2, dmom0, dmom1, dmom2
def remap(self, data, mask=None, masked=True, fill_value=0):
"""
Provided the vector of binned data, reconstruct the 2D map filling each
pixel in the same bin with the binned value.
Args:
data (`numpy.ndarray`_):
The data to remap. Shape must match the number of unique bin
IDs.
mask (`numpy.ndarray`_, optional):
Boolean mask with the same shape as ``data``. If None, all data
are unmasked.
masked (:obj:`bool`, optional):
Return data as a masked array, where any pixel not associated
with a bin is masked (in addition to the provided ``mask``.)
fill_value (scalar-like, optional):
Value used to fill the masked pixels, if a masked array is
*not* requested. Warning: The value is automatically
converted to be the same data type as the input array or
attribute.
Returns:
`numpy.ndarray`_, `numpy.ma.MaskedArray`_: 2D array with
the data remapped to a 2D array.
Raises:
ValueError:
Raised if shape of ``data`` does not match the expected 1d
shape.
"""
# Check the shapes
if data.shape != self.nbin.shape:
raise ValueError('To remap, data must have the same shape as the internal data '
'attributes: {0}'.format(self.nbin.shape))
if mask is not None and mask.shape != self.nbin.shape:
raise ValueError('To remap, mask must have the same shape as the internal data '
'attributes: {0}'.format(self.nbin.shape))
# Construct the output map
# NOTE: np.ma.masked_all sets the initial data array to 2.17506892e-314,
# which just leads to trouble. I've instead used the line below to make
# sure that the initial value is just 0.
_data = np.ma.MaskedArray(np.zeros(self.spatial_shape, dtype=data.dtype), mask=True)
_data[self.unravel] = data[self.bin_inverse]
if mask is not None:
np.ma.getmaskarray(_data)[self.unravel] = mask[self.bin_inverse]
# Return a masked array if requested; otherwise, fill the masked values
# with the type-cast fill value. WARNING: the default value of
# fill_value=0 will mean fill_value=False for a boolean array.
return _data if masked else _data.filled(_data.dtype.type(fill_value))
def remap_covar(self, covar):
"""
Remap a covariance matrix from the binned data to the unbinned map.
Pixels in the same bin are perfectly correlated.
Args:
covar (`numpy.ndarray`_, `scipy.sparse.csr_matrix`_):
Covariance matrix to remap.
Returns:
`scipy.sparse.csr_matrix`_: A sparse matrix for the remapped
covariance.
"""
gpm = np.ones(self.nbin, dtype=bool) if self.ubinid is None \
else self.remap(self.ubinid, masked=False, fill_value=-1) > -1
_bt = self.bin_transform[:,gpm.ravel()].T
_bt[_bt > 0] = 1.
return fill_matrix(_bt.dot(covar.dot(_bt.T)), gpm.ravel())
def unique(self, data):
"""
Provided a 2D array of data binned according to :attr:`binid`, select
and return the unique values from the map.
The difference between this method and :func:`bin` is that, instead of
averaging all the data within a bin, this method simply returns the
value for a single pixel within the bin region.
Args:
data (`numpy.ndarray`_):
The 2D data array from which to extract the unique data.
Shape must be :attr:`spatial_shape`.
Returns:
`numpy.ndarray`_: The 1D vector with the unique data.
Raises:
ValueError:
Raised if the input array shape is wrong.
"""
if data.shape != self.spatial_shape:
raise ValueError(f'Input has incorrect shape; found {data.shape}, '
f'expected {self.spatial_shape}.')
return data.flat[self.ubin_indx]
| 2.796875 | 3 |
binary_tree/tests/test_level_order_traversal.py | ahcode0919/python-ds-algorithms | 0 | 12769764 | <reponame>ahcode0919/python-ds-algorithms
from test_helpers.test_helpers import get_binary_tree
from binary_tree.level_order_traversal import level_order_traversal
def test_level_order_traversal():
tree = get_binary_tree()
assert level_order_traversal(tree) == [[1], [2, 3], [4, 5]]
| 3.046875 | 3 |
examples/VECs/microbench/test_switch.py | UTexas-PSAAP/Parla.py | 11 | 12769765 | <reponame>UTexas-PSAAP/Parla.py
import time
t = time.perf_counter()
from parla.multiload import multiload_contexts
begin_multiload = time.perf_counter() - t
m = 12
import_times = []
for i in range(m):
with multiload_contexts[i]:
t = time.perf_counter()
import numpy as np
t = time.perf_counter() - t
import_times.append(t)
with multiload_contexts[0]:
a = np.random.rand(1000, 1000)
b = np.random.rand(1000, 1000)
switch_times = []
for i in range(m):
t = time.perf_counter()
with multiload_contexts[i]:
t = time.perf_counter() - t
c = a @ b
switch_times.append(t)
print(np.mean(switch_times))
| 1.992188 | 2 |
helpers/org_files_by_date.py | rom1504/crawlingathome-gpu-hcloud | 0 | 12769766 | <reponame>rom1504/crawlingathome-gpu-hcloud
import os
import time
import shutil
# Change the directory and jump to the location
# where you want to arrange the files
os.chdir(r"/mnt/md1/export/rsync")
files = os.listdir('.')
# files in the current directory
i = 0
for file in files:
if os.path.isfile(file) and os.path.getmtime(file) < time.time() - 60*60 and file.endswith("gz"):
# Get all the details of the file creation
# and modification
time_format = time.gmtime(os.path.getmtime(file))
# Give the name of the folder
dir_name = str(time_format.tm_year) + "-" + \
str(time_format.tm_mon) + '-' + \
str(time_format.tm_mday)
# Check if the folder exists or not
if not os.path.isdir(dir_name):
# If not then make the new folder
os.mkdir(dir_name)
dest = dir_name
# Move all the files to their respective folders
try:
shutil.move(file, dest)
files.remove(file)
i += 1
if i%1000 == 0:
print ("+1000 files")
except:
pass
print("successfully moved...") | 2.875 | 3 |
gorilla/core/misc.py | sunjiahao1999/gorilla-core | 4 | 12769767 | <filename>gorilla/core/misc.py<gh_stars>1-10
# Copyright (c) Open-MMLab. All rights reserved.
import functools
import operator
import subprocess
import warnings
from collections import abc
from importlib import import_module
from inspect import getfullargspec
from six.moves import map, zip
from typing import Dict, List, Callable, Iterable, Optional, Sequence, Union
def import_modules_from_strings(imports, allow_failed_imports=False):
"""Import modules from the given list of strings.
Args:
imports (list | str | None): The given module names to be imported.
allow_failed_imports (bool): If True, the failed imports will return
None. Otherwise, an ImportError is raise. Default: False.
Returns:
list[module] | module | None: The imported modules.
Examples:
>>> osp, sys = import_modules_from_strings(
... ['os.path', 'sys'])
>>> import os.path as osp_
>>> import sys as sys_
>>> assert osp == osp_
>>> assert sys == sys_
"""
if not imports:
return
single_import = False
if isinstance(imports, str):
single_import = True
imports = [imports]
if not isinstance(imports, list):
raise TypeError(
f'custom_imports must be a list but got type {type(imports)}')
imported = []
for imp in imports:
if not isinstance(imp, str):
raise TypeError(
f'{imp} is of type {type(imp)} and cannot be imported.')
try:
imported_tmp = import_module(imp)
except ImportError:
if allow_failed_imports:
warnings.warn(f'{imp} failed to import and is ignored.',
UserWarning)
imported_tmp = None
else:
raise ImportError
imported.append(imported_tmp)
if single_import:
imported = imported[0]
return imported
def convert_list(input_list: List, type: Callable):
return list(map(type, input_list))
convert_list_str = functools.partial(convert_list, type=str)
convert_list_int = functools.partial(convert_list, type=int)
convert_list_float = functools.partial(convert_list, type=float)
def iter_cast(inputs: Iterable,
dst_type: Callable,
return_type: Optional[Callable] = None):
r"""Cast elements of an iterable object into some type.
Args:
inputs (Iterable): The input object.
dst_type (type): Destination type.
return_type (type, optional): If specified, the output object will be
converted to this type, otherwise an iterator.
Returns:
iterator or specified type: The converted object.
"""
if not isinstance(inputs, abc.Iterable):
raise TypeError("inputs must be an iterable object")
if not isinstance(dst_type, type):
raise TypeError("`dst_type` must be a valid type")
out_iterable = map(dst_type, inputs)
if return_type is None:
return out_iterable
else:
return return_type(out_iterable)
list_cast = functools.partial(iter_cast, return_type=list)
tuple_cast = functools.partial(iter_cast, return_type=tuple)
def is_seq_of(seq: Sequence,
expected_type: Callable,
seq_type: Optional[Callable] = None) -> bool:
r"""Check whether it is a sequence of some type.
Args:
seq (Sequence): The sequence to be checked.
expected_type (type): Expected type of sequence items.
seq_type (type, optional): Expected sequence type.
Returns:
bool: Whether the sequence is valid.
"""
exp_seq_type = abc.Sequence
if seq_type is not None:
assert isinstance(seq_type, type), "`seq_type` must be a valid type"
exp_seq_type = seq_type
if not isinstance(seq, exp_seq_type):
return False
for item in seq:
if not isinstance(item, expected_type):
return False
return True
is_list_of = functools.partial(is_seq_of, seq_type=list)
is_tuple_of = functools.partial(is_seq_of, seq_type=tuple)
def slice_list(in_list: List, lens: Union[int, List]) -> list:
r"""Slice a list into several sub lists by a list of given length.
Args:
in_list (list): The list to be sliced.
lens(int or list): The expected length of each out list.
Returns:
list: A list of sliced list.
"""
if isinstance(lens, int):
assert len(in_list) % lens == 0
lens = [lens] * int(len(in_list) / lens)
if not isinstance(lens, list):
raise TypeError("'indices' must be an integer or a list of integers")
elif sum(lens) != len(in_list):
raise ValueError(f"sum of lens and list length does not "
f"match: {sum(lens)} != {len(in_list)}")
out_list = []
idx = 0
for l in lens:
out_list.append(in_list[idx:idx + l])
idx += l
return out_list
def concat_list(in_list: List) -> list:
r"""Concatenate a list of list into a single list.
Args:
in_list (list): The list of list to be merged.
Returns:
list: The concatenated flat list.
"""
# return list(itertools.chain(*in_list))
return functools.reduce(operator.iconcat, in_list, [])
def check_prerequisites(prerequisites: Union[str, List[str]],
checker: Callable,
msg_tmpl: Optional[str] = None):
r"""A decorator factory to check if prerequisites are satisfied.
Args:
prerequisites (str of list[str]): Prerequisites to be checked.
checker (callable): The checker method that returns True if a
prerequisite is meet, False otherwise.
msg_tmpl (str | None, optional): The message template with two variables.
Returns:
decorator: A specific decorator.
"""
if msg_tmpl is None:
msg_tmpl = ("Prerequisites '{}' are required "
"in method '{}' but not found, "
"please install them first.")
def wrap(func):
@functools.wraps(func)
def wrapped_func(*args, **kwargs):
requirements = [prerequisites] if isinstance(prerequisites, str) \
else list_cast(prerequisites, str)
missing = []
for item in requirements:
if not checker(item):
missing.append(item)
if missing:
print(msg_tmpl.format(", ".join(missing), func.__name__))
raise RuntimeError("Prerequisites not meet.")
else:
return func(*args, **kwargs)
return wrapped_func
return wrap
def _check_py_package(package: str) -> bool:
r"""Check whether package can be import
Args:
package (str): Package"s name
Returns:
bool: The `package` can be import or not
"""
assert isinstance(package,
str), "`package` must be the string of package's name"
try:
import_module(package)
except ImportError:
return False
else:
return True
def _check_executable(cmd: str) -> bool:
r"""Check whether cmd can be executed
Args:
cmd (str): Cmd content
Returns:
bool: The `cmd` can be executed or not
"""
if subprocess.call(f"which {cmd}", shell=True) != 0:
return False
else:
return True
requires_package = functools.partial(check_prerequisites,
checker=_check_py_package)
requires_executable = functools.partial(check_prerequisites,
checker=_check_executable)
# NOTE: use to maintain
def deprecated_api_warning(name_dict: Dict, cls_name: Optional[str] = None):
r"""A decorator to check if some argments are deprecate and try to replace
deprecate src_arg_name to dst_arg_name.
Args:
name_dict(dict):
key (str): Deprecate argument names.
val (str): Expected argument names.
Returns:
func: New function.
"""
def api_warning_wrapper(old_func):
@functools.wraps(old_func)
def new_func(*args, **kwargs):
# get the arg spec of the decorated method
args_info = getfullargspec(old_func)
# get name of the function
func_name = old_func.__name__
if cls_name is not None:
func_name = f"{cls_name}.{func_name}"
if args:
arg_names = args_info.args[:len(args)]
for src_arg_name, dst_arg_name in name_dict.items():
if src_arg_name in arg_names:
warnings.warn(
f"'{src_arg_name}' is deprecated in `{func_name}`, "
f"please use `{dst_arg_name}` instead")
arg_names[arg_names.index(src_arg_name)] = dst_arg_name
if kwargs:
for src_arg_name, dst_arg_name in name_dict.items():
if src_arg_name in kwargs:
warnings.warn(
f"`{src_arg_name}` is deprecated in `{func_name}`, "
f"please use `{dst_arg_name}` ")
kwargs[dst_arg_name] = kwargs.pop(src_arg_name)
# apply converted arguments to the decorated method
output = old_func(*args, **kwargs)
return output
return new_func
return api_warning_wrapper
def multi_apply(func: Callable, *args, **kwargs):
r"""Apply function to a list of arguments.
Note:
This function applies the ``func`` to multiple inputs and
map the multiple outputs of the ``func`` into different
list. Each list contains the same type of outputs corresponding
to different inputs.
Args:
func (Callable): A function that will be applied to a list of
arguments
Returns:
tuple(list): A tuple containing multiple list, each list contains \
a kind of returned results by the function
"""
pfunc = functools.partial(func, **kwargs) if kwargs else func
map_results = map(pfunc, *args)
return tuple(map(list, zip(*map_results)))
def is_power2(num: int) -> bool:
r"""Author: liang.zhihao
Args:
num (int): input positive number
Returns:
bool: is the power of 2 or not
"""
return num != 0 and ((num & (num - 1)) == 0)
def is_multiple(num: Union[int, float], multiple: Union[int, float]) -> bool:
r"""Author: liang.zhihao
Args:
num (int, float): input number
multiple (int, float): input multiple number
Returns:
bool: can num be multiply by multiple or not
"""
return num != 0. and num % multiple == 0.
| 2.21875 | 2 |
conclusion/models.py | DamienMAYAUX/pilot_LEEP_RS | 0 | 12769768 | <gh_stars>0
from otree.api import (
models,
widgets,
BaseConstants,
BaseSubsession,
BaseGroup,
BasePlayer,
Currency as c,
currency_range,
)
author = '<NAME>'
doc = """
"""
class Constants(BaseConstants):
name_in_url = 'attention_test'
players_per_group = None
num_rounds = 1
class Subsession(BaseSubsession):
pass
class Group(BaseGroup):
pass
class Player(BasePlayer):
# feedback = models.LongStringField(label = "Feel free to leave us a feedback on the experiment ")
feedback = models.LongStringField( blank = True, label = """N'hésitez pas à donner ci-dessous votre ressenti\
sur l'expérience,\n à expliquer comment vous avez procédé ou à\
partager vos intuitions\n sur la pertinence de la recommandation.""")
minus5000done = models.BooleanField(initial=False)
| 2.1875 | 2 |
poll/migrations/0003_poll_expires.py | devs4v/opinify | 0 | 12769769 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('poll', '0002_remove_poll_expires'),
]
operations = [
migrations.AddField(
model_name='poll',
name='expires',
field=models.DateTimeField(default=datetime.datetime(2015, 10, 25, 17, 52, 51, 968925, tzinfo=utc)),
preserve_default=False,
),
]
| 1.632813 | 2 |
django_sqlalchemy/models/query.py | brosner/django-sqlalchemy | 4 | 12769770 | <gh_stars>1-10
from django.db.models.query import QuerySet
from django.db.models.sql.constants import ORDER_PATTERN
from sqlalchemy.orm import attributes
from sqlalchemy.orm.query import _ColumnEntity
from sqlalchemy.sql import operators
from django_sqlalchemy.models import query_utils as utils
from django.db.models.query_utils import Q
class SQLAlchemyQuerySet(QuerySet):
"""
A SQLAlchemy implementation of the Django QuerySet class
"""
def __init__(self, model=None, query=None):
self.model = model
self.query = query or self.model.query
def __and__(self, other):
combined = self._clone()
combined.query.combine(other.query, sql.AND)
return combined
def __or__(self, other):
combined = self._clone()
combined.query.combine(other.query, sql.OR)
return combined
def __repr__(self):
return repr(self.query.all())
def __len__(self):
return self.query.count()
def __iter__(self):
return self.iterator()
def __getitem__(self, k):
# TODO: with 0.5 SA executes this immediately, Django doesn't
return list(self.query)[k]
####################################
# METHODS THAT DO DATABASE QUERIES #
####################################
def iterator(self):
"""
An iterator over the results from applying this QuerySet to the
database.
"""
return iter(self.query)
def count(self):
"""
Performs a SELECT COUNT() and returns the number of records as an
integer.
If the queryset is already cached (i.e. self._result_cache is set) this
simply returns the length of the cached results set to avoid multiple
SELECT COUNT(*) calls.
"""
return self.query.count()
def get(self, *args, **kwargs):
"""
Performs the query and returns a single object matching the given
keyword arguments.
"""
obj = list(self.filter(*args, **kwargs).query[0:2])
count = len(obj)
if count == 1:
return obj[0]
elif count == 0:
raise self.model.DoesNotExist("%s matching query does not exist."
% self.model._meta.object_name)
else:
raise self.model.MultipleObjectsReturned("get() returned more than one %s -- it returned %s! Lookup parameters were %s"
% (self.model._meta.object_name, count, kwargs))
def first(self):
"""
Return the first result of the underlying ``Query`` or None if the result doesn't contain any row.
This results in an execution of the underlying query.
"""
return self.query.first()
def create(self, **kwargs):
"""
Create a new object with the given kwargs, saving it to the database
and returning the created object.
"""
obj = self.model(**kwargs)
obj.save()
return obj
def get_or_create(self, **kwargs):
"""
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
assert kwargs, \
'get_or_create() must be passed at least one keyword argument'
defaults = kwargs.pop('defaults', {})
try:
return self.get(**kwargs), False
except self.model.DoesNotExist:
try:
params = dict([(k, v) for k, v in kwargs.items() if '__' not in k])
params.update(defaults)
obj = self.model(**params)
obj.save()
return obj, True
except IntegrityError, e:
return self.get(**kwargs), False
def latest(self, field_name=None):
"""
Returns the latest object, according to the model's 'get_latest_by'
option or optional given field_name.
"""
latest_by = field_name or self.model._meta.get_latest_by
assert bool(latest_by), "latest() requires either a field_name parameter or 'get_latest_by' in the model"
return self.order_by('-%s' % latest_by).first()
def in_bulk(self, id_list):
"""
Returns a dictionary mapping each of the given IDs to the object with
that ID.
"""
assert isinstance(id_list, (tuple, list)), \
"in_bulk() must be provided with a list of IDs."
if not id_list:
return {}
qs = self.filter(**{'pk__in': id_list})
return dict([(obj._get_pk_val(), obj) for obj in qs.iterator()])
def delete(self):
"""
Deletes the records in the current QuerySet.
"""
# this approach although hackish results in one less
# query, the select. This is more optimized than
# Django's default. Hopefully it won't pressent a
# problem.
self.model.__table__.delete(self.query.compile()._whereclause).execute()
delete.alters_data = True
def update(self, **kwargs):
"""
Updates all elements in the current QuerySet, setting all the given
fields to the appropriate values.
"""
values = self._parse_update_values(**kwargs)
self.model.__table__.update(self.query.compile()._whereclause, values).execute()
update.alters_data = True
##################################################
# PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
##################################################
def values(self, *fields):
"""
Returns a list of dicts with only the columns specified. This works
by wrapping the SQLAlchemyQuerySet in a SQLAlchemyValuesQuerySet
that modifies the setup and iterator behavior.
"""
from django_sqlalchemy.models.query import SQLAlchemyValuesQuerySet
return self._clone(klass=SQLAlchemyValuesQuerySet, setup=True, _fields=fields)
def values_list(self, *fields, **kwargs):
"""
Returns a list of tuples for each of the fields specified. This
works by wrapping the SQLAlchemyQuerySet in a
SQLAlchemyValuesListQuerySet that modifies the iterator behavior.
The flat option is only available with one column and flattens
out the tuples into a simple list.
"""
flat = kwargs.pop('flat', False)
if kwargs:
raise TypeError('Unexpected keyword arguments to values_list: %s'
% (kwargs.keys(),))
if flat and len(fields) > 1:
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
from django_sqlalchemy.models.query import SQLAlchemyValuesListQuerySet
return self._clone(klass=SQLAlchemyValuesListQuerySet, setup=True, flat=flat,
_fields=fields)
def dates(self, field_name, kind, order='ASC'):
"""
TODO: Need to map dates
Returns a list of datetime objects representing all available dates
for the given field_name, scoped to 'kind'.
"""
assert kind in ("month", "year", "day"), \
"'kind' must be one of 'year', 'month' or 'day'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
# Let the FieldDoesNotExist exception propagate.
field = self.model._meta.get_field(field_name, many_to_many=False)
assert isinstance(field, DateField), "%r isn't a DateField." \
% field_name
return self._clone(klass=DateQuerySet, setup=True, _field=field,
_kind=kind, _order=order)
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Returns a new QuerySet that is a copy of the current one. This allows a
QuerySet to proxy for a model manager in some cases.
"""
return self._clone()
def filter(self, *args, **kwargs):
"""
Returns a new QuerySet instance with the args ANDed to the existing
set.
"""
return self._filter_or_exclude(False, *args, **kwargs)
def exclude(self, *args, **kwargs):
"""
Returns a new QuerySet instance with NOT (args) ANDed to the existing
set.
"""
return self._filter_or_exclude(True, *args, **kwargs)
def _filter_or_exclude(self, exclude, *args, **kwargs):
"""
This does the actual filtering, either combined filtering or
excluding depending on the exclude flag.
"""
from django_sqlalchemy.models import query_utils
return query_utils.parse_filter(self, exclude, **kwargs)
def complex_filter(self, filter_obj):
"""
TODO:need to map complex_filter
Returns a new QuerySet instance with filter_obj added to the filters.
filter_obj can be a Q object (or anything with an add_to_query()
method) or a dictionary of keyword lookup arguments.
This exists to support framework features such as 'limit_choices_to',
and usually it will be more natural to use other methods.
"""
if isinstance(filter_obj, Q) or hasattr(filter_obj, 'add_to_query'):
return self._filter_or_exclude(None, filter_obj)
else:
return self._filter_or_exclude(None, **filter_obj)
def options(self, *args):
"""Return a new QuerySet object, applying the given list of
SQLAlchemy MapperOptions.
"""
obj = self._clone()
obj.query.options(*args)
return obj
def select_related(self, *fields, **kwargs):
"""
TODO:need to map select_related
Returns a new QuerySet instance that will select related objects. If
fields are specified, they must be ForeignKey fields and only those
related objects are included in the selection.
"""
depth = kwargs.pop('depth', 0)
if kwargs:
raise TypeError('Unexpected keyword arguments to select_related: %s'
% (kwargs.keys(),))
obj = self._clone()
if fields:
if depth:
raise TypeError('Cannot pass both "depth" and fields to select_related()')
obj.query.add_select_related(fields)
else:
obj.query.select_related = True
if depth:
obj.query.max_depth = depth
return obj
def order_by(self, *field_names):
"""
Returns a new QuerySet instance with the ordering changed.
These items are either field names (not column names) --
possibly with a direction prefix ('-' or '?') -- or ordinals,
corresponding to column positions in the 'select' list.
"""
obj = self._clone()
# django likes to clear the ordering, not sure why, because
# this is inconsistent with the filter approach
obj.query._order_by = False
errors = []
for item in field_names:
if not ORDER_PATTERN.match(item):
errors.append(item)
if errors:
raise FieldError('Invalid order_by arguments: %s' % errors)
from django_sqlalchemy.models import query_utils
return query_utils.parse_order_by(obj, *field_names)
def distinct(self, true_or_false=True):
"""
Returns a new QuerySet instance that will select only distinct results.
"""
clone = self._clone()
clone.query._distinct = true_or_false
return clone
def extra(self, select=None, where=None, params=None, tables=None,
order_by=None, select_params=None):
"""
TODO: Need to Map extra()
Adds extra SQL fragments to the query.
"""
#assert self.query.can_filter(), \
#"Cannot change a query once a slice has been taken"
clone = self._clone()
#clone.query.add_extra(select, select_params, where, params, tables, order_by)
return clone
def reverse(self):
"""
Reverses the ordering of the queryset.
"""
clone = self._clone()
for field in clone.query._order_by:
if field.modifier == operators.desc_op:
field.modifier = operators.asc_op
else:
field.modifier = operators.desc_op
return clone
###################
# PRIVATE METHODS #
###################
def _parse_update_values(self, **kwargs):
from django.db.models.base import Model
values = {}
for name, val in kwargs.iteritems():
field, model, direct, m2m = self.model._meta.get_field_by_name(name)
if not direct or m2m:
raise FieldError('Cannot update model field %r (only non-relations and foreign keys permitted).' % field)
if field.rel and isinstance(val, Model):
val = val.pk
values[field.column] = val
return values
def _clone(self, klass=None, setup=False, **kwargs):
if klass is None:
klass = self.__class__
c = klass(model=self.model, query=self.query._clone())
c.__dict__.update(kwargs)
if setup and hasattr(c, '_setup_query'):
c._setup_query()
return c
class SQLAlchemyValuesQuerySet(SQLAlchemyQuerySet):
def __init__(self, *args, **kwargs):
self.field_names = []
super(SQLAlchemyValuesQuerySet, self).__init__(*args, **kwargs)
def __repr__(self):
return repr([o for o in self.iterator()])
def iterator(self):
for row in self.query:
yield dict(zip(self.field_names, row))
def _setup_query(self):
"""
Constructs the field_names list that the values query will be
retrieving.
Called by the _clone() method after initialising the rest of the
instance.
"""
if self._fields:
self.field_names = list(self._fields) + self.field_names
else:
# Default to all fields.
self.field_names = [f.attname for f in self.model._meta.fields]
field_names = utils.fields_to_sa_columns(self, *self.field_names)
self.query = self.query.values(*field_names)
class SQLAlchemyValuesListQuerySet(SQLAlchemyValuesQuerySet):
def iterator(self):
if self.flat and len(self._fields) == 1:
for row in iter(self.query):
yield row[0]
else:
for row in iter(self.query):
yield row
| 2.4375 | 2 |
module_test.py | chroblert/sourceCodeCheck | 6 | 12769771 | import os
import time
import re
# import lxml
from lxml import etree
from tool import read_txt_file_to_list
with open('ttttt.html','a',encoding = 'utf-8') as f:
f.write('盖雅放假啊')
f.write('\n')
with open('ttttt.html','r',encoding = 'utf-8') as f:
fileHtml = f.read()
sensitiveKeywordListUri = './config/sensitiveKeywords.txt'
sensitiveKeywordList = read_txt_file_to_list(sensitiveKeywordListUri)
print(sensitiveKeywordList)
isHaveSensitiveKeyword = True
fileWeight = 0
havedSensitiveKeywordList = []
for sensitiveKeyword in sensitiveKeywordList:
regPattern = re.compile(r'' + sensitiveKeyword + '')
result = regPattern.findall(fileHtml)
havedSensitiveKeywordList.extend(result)
if len(havedSensitiveKeywordList) == 0:
isHaveSensitiveKeyword = False
print("不包含一些敏感信息词汇")
else:
fileWeight = fileWeight + len(havedSensitiveKeywordList)
print("包含的敏感词汇如下:")
print(havedSensitiveKeywordList)
| 2.96875 | 3 |
docxx/oxml/notes.py | betasewer/python-docx-xtended | 1 | 12769772 | <reponame>betasewer/python-docx-xtended
# encoding: utf-8
"""
Custom element classes related to the numbering part
"""
from docxx.oxml import OxmlElement
from docxx.oxml.shared import CT_DecimalNumber
from docxx.oxml.simpletypes import ST_DecimalNumber, ST_FtnEdn, ST_OnOff
from docxx.oxml.xmlchemy import (
BaseOxmlElement, OneAndOnlyOne, RequiredAttribute, OptionalAttribute, ZeroOrMore, ZeroOrOne
)
class CT_Endnotes(BaseOxmlElement):
"""
``<w:num>`` element, which represents a concrete list definition
instance, having a required child <w:abstractNumId> that references an
abstract numbering definition that defines most of the formatting details.
"""
endnote = ZeroOrMore('w:endnote',successors=())
class CT_Footnotes(BaseOxmlElement):
"""
``<w:num>`` element, which represents a concrete list definition
instance, having a required child <w:abstractNumId> that references an
abstract numbering definition that defines most of the formatting details.
"""
footnote = ZeroOrMore('w:footnote',successors=())
class CT_FtnEdn(BaseOxmlElement):
p = ZeroOrMore('w:p', successors=('w:altChunk',))
tbl = ZeroOrMore('w:tbl', successors=('w:altChunk',))
altChunk = ZeroOrMore('w:altChunk', successors=())
type = OptionalAttribute('w:type', ST_FtnEdn)
id = RequiredAttribute('w:id', ST_DecimalNumber)
class CT_FtnEdnRef(BaseOxmlElement):
customMarkFollows = OptionalAttribute('w:customMarkFollows', ST_OnOff)
id = RequiredAttribute('w:id', ST_DecimalNumber)
| 2.359375 | 2 |
tests/task.py | AlexGrig/pytorch-lr-finder | 0 | 12769773 | <reponame>AlexGrig/pytorch-lr-finder<gh_stars>0
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, Subset
import pytest
from model import LinearMLP
from dataset import XORDataset, ExtraXORDataset
def use_cuda():
if pytest.custom_cmdopt.cpu_only:
return False
else:
return torch.cuda.is_available()
class TaskTemplate(type):
def __call__(cls, *args, **kwargs):
obj = type.__call__(cls, *args, **kwargs)
if hasattr(obj, "__post_init__"):
obj.__post_init__()
return obj
class BaseTask(metaclass=TaskTemplate):
def __init__(self):
self.batch_size = -1
self.model = None
self.optimizer = None
self.criterion = None
self.device = None
self.train_loader = None
self.val_loader = None
def __post_init__(self):
# Check whether cuda is available or not, and we will cast `self.device`
# to `torch.device` here to make sure operations related to moving tensor
# would work fine later.
if not use_cuda():
self.device = None
if self.device is None:
return
if isinstance(self.device, str):
self.device = torch.device(self.device)
elif not isinstance(self.device, torch.device):
raise TypeError("Invalid type of device.")
class XORTask(BaseTask):
def __init__(self, validate=False):
super(XORTask, self).__init__()
bs, steps = 8, 64
dataset = XORDataset(bs * steps)
if validate:
self.train_loader = DataLoader(Subset(dataset, range(steps - bs)))
self.val_loader = DataLoader(Subset(dataset, range(steps - bs, steps)))
else:
self.train_loader = DataLoader(dataset)
self.val_loader = None
self.batch_size = bs
self.model = LinearMLP([8, 4, 1])
self.optimizer = optim.SGD(self.model.parameters(), lr=1e-3)
self.criterion = nn.MSELoss()
self.device = torch.device("cuda")
class ExtraXORTask(BaseTask):
def __init__(self, validate=False):
super(ExtraXORTask, self).__init__()
bs, steps = 8, 64
dataset = ExtraXORDataset(bs * steps, extra_dims=2)
if validate:
self.train_loader = DataLoader(Subset(dataset, range(steps - bs)))
self.val_loader = DataLoader(Subset(dataset, range(steps - bs, steps)))
else:
self.train_loader = DataLoader(dataset)
self.val_loader = None
self.model = LinearMLP([8, 4, 1])
self.optimizer = optim.SGD(self.model.parameters(), lr=1e-3)
self.criterion = nn.MSELoss()
self.device = torch.device("cuda")
class DiscriminativeLearningRateTask(BaseTask):
def __init__(self, validate=False):
super(DiscriminativeLearningRateTask, self).__init__()
bs, steps = 8, 64
dataset = XORDataset(bs * steps)
if validate:
self.train_loader = DataLoader(Subset(dataset, range(steps - bs)))
self.val_loader = DataLoader(Subset(dataset, range(steps - bs, steps)))
else:
self.train_loader = DataLoader(dataset)
self.val_loader = None
dataset = XORDataset(128)
self.model = LinearMLP([8, 4, 1])
self.optimizer = optim.SGD(
[
{"params": self.model.net[0].parameters(), "lr": 0.01},
{"params": self.model.net[1].parameters(), "lr": 0.001},
],
lr=1e-3,
momentum=0.5,
)
self.criterion = nn.MSELoss()
self.device = torch.device("cuda")
| 2.234375 | 2 |
market/core/forms.py | katomaso/django-market | 0 | 12769774 | # coding: utf-8
import re
import logging
from decimal import Decimal
from allauth import account
from django import forms
from django.forms import widgets
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from django.forms.utils import ErrorList, ErrorDict
from autocomplete_light import shortcuts as autocomplete_light
from bitcategory.fields import HierarchicalField
from captcha import fields as recaptcha_fields
from market.utils.forms import (
ModelChoiceCreationField,
Model2CharField,
)
from market.utils.widgets import (
CurrencyInput,
ClearableImageInput,
)
from . import models
logger = logging.getLogger(__name__)
# register autocomplete stuff
autocomplete_light.register(
models.Manufacturer,
search_fields=['name', ],
autocomplete_js_attributes={
'minimum_characters': 3,
},
widget_js_attributes={
'max_values': 6,
}
)
autocomplete_light.register(
models.Product,
search_fields=['name', ],
autocomplete_js_attributes={
'minimum_characters': 3,
},
widget_js_attributes={
'max_values': 6,
}
)
class UserForm(forms.ModelForm):
"""Form to update basic user's information."""
class Meta:
"""Meta."""
fields = ("name",)
model = models.User
class SecuredEmailForm(forms.Form):
"""Form to get contact email from the user."""
messages = {
"email_exists": _("Email already registered."),
"invalid_credentials": _("Incorrect email and password combination"),
}
# captcha = recaptcha_fields.ReCaptchaField()
email = forms.EmailField(required=True)
password = forms.CharField(label=_("Password"), required=False,
widget=widgets.PasswordInput)
def __init__(self, *args, **kwargs):
"""Save `request` from kwargs for optional password validation."""
self.request = kwargs.pop('request', None)
super(SecuredEmailForm, self).__init__(*args, **kwargs)
def clean(self):
"""Validate whether email has not been taken yet."""
cleaned_data = super(SecuredEmailForm, self).clean()
user = None
if cleaned_data.get('email'):
if cleaned_data.get('password'):
user = (account.adapter.get_adapter(self.request)
.authenticate(
self.request,
email=cleaned_data['email'],
password=cleaned_data['password']))
if user is None:
raise ValidationError(self.messages["invalid_credentials"])
else: # no password
confirmed_email = account.models.EmailAddress.objects.filter(
email=cleaned_data['email'])
if confirmed_email.exists():
raise ValidationError(self.messages["email_exists"])
cleaned_data["user"] = user
return cleaned_data
class BaseSignupForm(forms.Form):
"""Serves as a base class for (allauth.)account.forms.SignupForm.
Its only purpose is to provide `full name` field.
"""
messages = {
"name": _("Name is required")
}
name = forms.CharField(label=_("Full name"),
max_length=70, required=True,
widget=widgets.TextInput(
attrs={'placeholder': _('Full name'),
'autofocus': 'autofocus'}))
def signup(self, request, user):
"""Invoked at signup time to complete the signup of the user."""
pass
def clean(self):
"""Split name into first_name and last_name for backward compatibility."""
cleaned_data = super().clean()
if 'name' not in cleaned_data:
raise ValidationError(self.messages['name'])
cleaned_data['first_name'], cleaned_data['last_name'] = \
cleaned_data['name'].strip().split(" ", 1)
return cleaned_data
class AddressForm(forms.ModelForm):
"""Mandatory address form."""
class Meta:
"""Meta."""
model = models.Address
exclude = ("user_shipping", "user_billing", "state", "position", "position_x", "position_y")
widgets = {
"extra": forms.Textarea(attrs={"cols": 23, "rows": 5})
}
class PositionForm(forms.ModelForm):
"""It is an Address form with fields necessary for location and is optional."""
address_visible = forms.BooleanField(label=_("I have a physical vendor."),
required=False)
class Meta:
"""Meta."""
model = models.Address
exclude = ("user_shipping", "user_billing", "state",
"name", "business_id", "tax_id", "zip_code")
widgets = {
'position': forms.HiddenInput,
'position_x': forms.HiddenInput,
'position_y': forms.HiddenInput,
}
class Media:
"""Media."""
js = ('https://api4.mapy.cz/loader.js', )
def is_valid(self):
"""Decide whether to clean form based on visibility of the address."""
if not hasattr(self, "cleaned_data"):
self.full_clean()
if not self.cleaned_data.get("address_visible", False):
return True
return super(PositionForm, self).is_valid()
def clean(self):
data = self.cleaned_data
if not data["address_visible"]:
self._errors = ErrorDict()
return data
class VendorAddressForm(forms.ModelForm):
"""Vendor address has more mandatory field than generic address."""
name = forms.CharField(max_length=255, required=True, label=_("Name"))
business_id = forms.CharField(max_length=10, required=False, label=_("Business number"))
tax_id = forms.CharField(max_length=12, required=False, label=_("Tax ID"))
zip_code = forms.CharField(max_length=10, required=True, label=_("Zip code"))
class Meta:
model = models.Address
exclude = ("user_shipping", "user_billing", "state", "position", "position_x", "position_y")
class VendorForm(forms.ModelForm):
"""Form for creating and updating Vendor."""
category = HierarchicalField(queryset=models.Category.objects.all(), label=_("Category"))
bank_account = Model2CharField(
models.BankAccount, max_length=30, label=_("Bank account"), required=False)
_messages = {
"bank_account_number": _("Bank account number should be PREFIX - NUMBER / BANK"),
}
class Meta:
"""Define model and fields to be handled."""
model = models.Vendor
fields = ("name", "category", "motto", "description", "ships", "logo", "openings", "bank_account")
widgets = {
'description': forms.Textarea(attrs={"class": "input-xxlarge"}),
}
def __init__(self, *args, **kwargs):
"""Initialize M2M with all possibilities."""
kwargs.update(prefix="vendor")
super(VendorForm, self).__init__(*args, **kwargs)
def clean_bank_account(self):
"""Parse bank account number and construct an instance of BankAccount model."""
if not self.cleaned_data.get('bank_account'):
return None
number = self.cleaned_data['bank_account']
match_o = re.match(r'(?:(\d+)\s*\-\s*)?(\d+)\s*/\s*(\d{4})', number)
if match_o is None:
raise ValidationError(self._messages["bank_account_number"])
try:
int(match_o.group(2))
except:
raise ValidationError(self._messages["bank_account_number"])
try:
bank_account = self.instance.bank_account
bank_account.prefix = match_o.group(1)
bank_account.number = match_o.group(2)
bank_account.bank = match_o.group(3)
bank_account.save()
except:
bank_account = models.BankAccount.objects.create(
prefix=match_o.group(1), number=match_o.group(2), bank=match_o.group(3))
return bank_account
class ProductForm(forms.ModelForm):
"""Add product to a vendor."""
name = forms.CharField(
widget=autocomplete_light.TextWidget(
'ProductAutocomplete',
attrs={"placeholder": _("select your product if we already know it")}),
required=True, label=_("Name"))
category = HierarchicalField(queryset=models.Category.objects.all(), label=_("Category"))
manufacturer = ModelChoiceCreationField(
label=_("Manufacturer"),
queryset=models.Manufacturer.objects.all(),
to_field_name="name", required=False,
widget=autocomplete_light.TextWidget(
'ManufacturerAutocomplete',
attrs={"placeholder": _("select the manufacturer if we already know them")}))
class Meta:
model = models.Product
fields = ("name", "category", "description", "manufacturer",
"photo", "expedition_days", "tax")
widgets = {
'description': forms.Textarea,
'extra': forms.Textarea,
"photo": ClearableImageInput,
}
class OfferForm(forms.ModelForm):
class Meta:
model = models.Offer
fields = ("product", "unit_price", "note", "shipping_price")
widgets = {
'product': forms.HiddenInput,
'unit_price': CurrencyInput,
'shipping_price': CurrencyInput,
'note': widgets.TextInput,
}
class Media:
js = list()
def clean_shipping_price(self):
shipping_price = self.cleaned_data.get('shipping_price', '')
if not shipping_price:
return Decimal('0.00')
return Decimal(shipping_price)
def _post_clean(self):
try:
return super(OfferForm, self)._post_clean()
except ValueError:
self._errors['product'] = ErrorList([_("Field is required"), ])
class AddressesForm(forms.Form):
"""Uberform which manages shipping and billing addresses.
It provides the option for the addresses to be the same.
You can pass `billing` (resp. `shipping`) models instances to edit them.
You can pass your own `billing_form_class` and `shipping_form_class` which
have to be `ModelForm` subclasses.
Validity of empty form is controlled by a checkbox field `necessary`. If the
addresses are not necessary then forms data "shipping" and "billing" will
be always empty dictionaries.
This form contains one own field - `addresses_the_same` and two subforms -
`billing`, `shipping`
"""
error_messages = {
'empty': _('Shipping address has to be filled when marked different'),
'required': _('Billing address is required')
}
necessary = forms.BooleanField(
label=_("Mark whether address is necessary"), required=False, initial=False)
addresses_the_same = forms.BooleanField(
label=_("Shipping is the same as billing"), required=False, initial=True)
def __init__(self, data=None, files=None, billing=None, shipping=None,
billing_form_class=None, shipping_form_class=None,
auto_id='id_%s', prefix=None, initial={}, error_class=ErrorList,
label_suffix=None):
"""Initialize with two addresses for billing and shipping."""
super(AddressesForm, self).__init__(data, files,
initial={"addresses_the_same": (shipping == billing)},
label_suffix=label_suffix)
assert billing_form_class is not None or shipping_form_class is not None
# TODO: construct a ModelForm from Address model instance
bform = (billing_form_class or shipping_form_class)
sform = (shipping_form_class or billing_form_class)
self.billing = bform(data, files, instance=billing, prefix="billing",
initial=initial.pop("billing", None), label_suffix=label_suffix)
self.shipping = sform(data, files, prefix="shipping",
instance=shipping if shipping != billing else None,
initial=initial.pop("shipping", None), label_suffix=label_suffix)
self.billing_empty = False # helper in save method (bcs Form does not have is_empty method)
def clean(self):
"""The form is valid even when both addresses are empty."""
data = self.cleaned_data
is_necessary = data.get('necessary', False)
# Billing is required (if `is_necessary`)
if is_necessary and not self.billing.is_valid():
raise ValidationError(self.error_messages['required'])
# User marks addresses as different - check they both are valid
if is_necessary and not data.get('addresses_the_same', True):
if not all((self.shipping.is_valid(), self.billing.is_valid())):
raise ValidationError(self.error_messages['empty'])
# Mark as valid in case no addresses are necessary
if not is_necessary:
self.billing._errors = ErrorDict()
self.shipping._errors = ErrorDict()
data['billing'] = getattr(self.billing, "cleaned_data", {})
data['shipping'] = getattr(self.shipping, "cleaned_data", {})
return data
def save(self, commit=True):
"""Return tuple with address models.
In the case when empty form was allowed (`required=False` in the constructor)
tuple `(None, None)` might be returned.
"""
billing = None
shipping = None
if not self.cleaned_data['necessary']:
return (billing, shipping)
billing = self.billing.save(commit=commit)
if self.cleaned_data['addresses_the_same']:
shipping = billing
else:
if billing.user_shipping is not None:
billing.user_shipping = None
billing.save()
shipping = self.shipping.save(commit=commit)
return (billing, shipping)
def save_to_request(self, request):
if request.user.is_authenticated():
billing, shipping = self.save(commit=False)
if shipping:
shipping.user_shipping = request.user
if not shipping.name:
shipping.name = request.user.get_full_name()
if shipping != billing:
# reset billing address because it could have changed
shipping.user_billing = None
shipping.save()
if billing:
billing.user_billing = request.user
if not billing.name:
billing.name = request.user.get_full_name()
billing.save()
else:
billing, shipping = self.save(commit=True)
if shipping:
request.session['shipping_address_id'] = shipping.pk
if billing:
request.session['billing_address_id'] = billing.pk
return billing, shipping
| 2.03125 | 2 |
format_data.py | JohndeVostok/THU-Network-Management | 0 | 12769775 | import os
import argparse
import json
from utils import print_host
from xml.dom import minidom
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input", type=str, help="input dir")
parser.add_argument("--output", type=str, help="output file")
args = parser.parse_args()
print(args)
hosts = []
file_list = os.listdir(args.input)
for filename in file_list:
if filename.split(".")[-1] != "xml":
continue
domTree = minidom.parse(args.input + "/" + filename)
rootNode = domTree.documentElement
hosts_node = rootNode.getElementsByTagName("host")
for host_node in hosts_node:
# Host
host = {"addr": "", "addr_type": "", "hostnames": [], "ports": [], "os_list": []}
# Address
addr_node = host_node.getElementsByTagName("address")[0]
addr = addr_node.getAttribute("addr")
addr_type = addr_node.getAttribute("addrtype")
host["addr"] = addr
host["addr_type"] = addr_type
# Hostnames
hostnames_node = host_node.getElementsByTagName("hostnames")[0].getElementsByTagName("hostname")
hostnames = []
for hostname_node in hostnames_node:
hostnames.append({"name": hostname_node.getAttribute("name"), "type": hostname_node.getAttribute("type")})
host["hostnames"] = hostnames
# Ports
ports_node_root = host_node.getElementsByTagName("ports")
if len(ports_node_root) > 0:
ports_node = ports_node_root[0].getElementsByTagName("port")
ports = []
for port_node in ports_node:
port = {}
port["protocol"] = port_node.getAttribute("protocol")
port["portid"] = port_node.getAttribute("portid")
port["state"] = port_node.getElementsByTagName("state")[0].getAttribute("state")
port["service"] = port_node.getElementsByTagName("service")[0].getAttribute("name")
ports.append(port)
host["ports"] = ports
# OS
os_root = host_node.getElementsByTagName("os")
if len(os_root) > 0:
os_list_node = os_root[0].getElementsByTagName("osmatch")
os_list = []
for os_node in os_list_node:
os = {}
os["name"] = os_node.getAttribute("name")
os["type"] = os_node.getElementsByTagName("osclass")[0].getAttribute("type")
os["vendor"] = os_node.getElementsByTagName("osclass")[0].getAttribute("vendor")
os["family"] = os_node.getElementsByTagName("osclass")[0].getAttribute("osfamily")
cpes_node = os_node.getElementsByTagName("osclass")[0].getElementsByTagName("cpe")
cpe = []
for cpe_node in cpes_node:
cpe.append(cpe_node.childNodes[0].data)
os["cpe"] = cpe
os_list.append(os)
host["os_list"] = os_list
fingers_node = os_root[0].getElementsByTagName("osfingerprint")
fingers = []
for finger_node in fingers_node:
finger = finger_node.getAttribute("fingerprint")
fingers.append(finger)
host["fingers"] = fingers
hosts.append(host)
with open(args.output, "w") as f:
json.dump(hosts, f) | 3 | 3 |
external/kit_sch_ge/run_tracking.py | sjeknic/CellST | 0 | 12769776 | from pathlib import Path
import numpy as np
from tifffile import imread
from tracker.export import ExportResults
from tracker.extract_data import get_img_files
from tracker.extract_data import get_indices_pandas
from tracker.tracking import TrackingConfig, MultiCellTracker
def run_tracker(img_path, segm_path, res_path, delta_t=3, default_roi_size=2):
img_path = Path(img_path)
segm_path = Path(segm_path)
res_path = Path(res_path)
img_files = get_img_files(img_path)
segm_files = get_img_files(segm_path, 'mask')
# set roi size
# assume img shape z,x,y
dummy = np.squeeze(imread(segm_files[max(segm_files.keys())]))
img_shape = dummy.shape
masks = get_indices_pandas(imread(segm_files[max(segm_files.keys())]))
m_shape = np.stack(masks.apply(lambda x: np.max(np.array(x), axis=-1) - np.min(np.array(x), axis=-1) +1))
if len(img_shape) == 2:
if len(masks) > 10:
m_size = np.median(np.stack(m_shape)).astype(int)
roi_size = tuple([m_size*default_roi_size, m_size*default_roi_size])
else:
roi_size = tuple((np.array(dummy.shape) // 10).astype(int))
else:
roi_size = tuple((np.median(np.stack(m_shape), axis=0) * default_roi_size).astype(int))
config = TrackingConfig(img_files, segm_files, roi_size, delta_t=delta_t, cut_off_distance=None)
tracker = MultiCellTracker(config)
tracks = tracker()
exporter = ExportResults()
exporter(tracks, res_path, tracker.img_shape, time_steps=sorted(img_files.keys()))
if __name__ == '__main__':
from argparse import ArgumentParser
PARSER = ArgumentParser(description='Tracking KIT-Sch-GE.')
PARSER.add_argument('--image_path', type=str, help='path to the folder containing the raw images.')
PARSER.add_argument('--segmentation_path', type=str, help='path to the folder containing the segmentation images.')
PARSER.add_argument('--results_path', type=str, help='path where to store the tracking results. '
'If the results path is the same as the segmentation'
'_path the segmentation images will be overwritten.')
PARSER.add_argument('--delta_t', type=int, default=3)
PARSER.add_argument('--default_roi_size', type=int, default=2)
ARGS = PARSER.parse_args()
run_tracker(ARGS.image_path, ARGS.segmentation_path, ARGS.results_path, ARGS.delta_t, ARGS.default_roi_size)
| 2.3125 | 2 |
app/decorators.py | Andrew342/flask_study | 0 | 12769777 | from functools import wraps
from flask import abort
from flask_login import current_user
from .models import Permission
def permission_required(permission):
def decorator(f):
@wraps(f)
def decarated_function(*args,**kwargs):
if not current_user.can(permission):
abort(403)
return f(*args,**kwargs)
return decarated_function
return decorator
def admin_required(f):
return permission_required(Permission.ADMIN)(f) | 2.46875 | 2 |
app/users/models/users.py | chavez897/blood-bank-management | 0 | 12769778 | """
Application name: users.py
Author/Programmer: <NAME>
Date application created: April 1st, 2022
This model helps to define the strucutre of stored data.
The fields used are:
*id
*is_active
*email
*username
*name
*last_name
*is_verified
"""
from django.db import models
from django.utils.translation import gettext_lazy as _
from utils.models import CustomAbstractUser
class User(CustomAbstractUser):
is_active = models.BooleanField(
_("active"),
default=True,
help_text=(
"Indica si el registro debe ser tratado como activo.",
"Desmarque esta opción en lugar de borrar el registro",
),
)
email = models.EmailField(
"email address",
unique=True,
error_messages={"unique": "A user with that email already exists."},
)
USERNAME_FIELD = "email"
REQUIRED_FIELDS = ["username", "name", "last_name"]
is_verified = models.BooleanField(
"verified",
default=True,
help_text="Set to true when the user have verified its email address.",
)
class Meta:
verbose_name = "User"
verbose_name_plural = "Users"
def __str__(self):
"""Return username."""
return self.username | 3.03125 | 3 |
nncli/gui.py | CraigInches/nncli | 0 | 12769779 | # -*- coding: utf-8 -*-
"""nncli_gui module"""
import hashlib
import subprocess
import threading
import urwid
from . import view_titles, view_note, view_help, view_log, user_input
from .utils import exec_cmd_on_note, get_pager
# pylint: disable=too-many-instance-attributes, unused-argument
class NncliGui:
"""NncliGui class. Responsible for the console GUI view logic."""
def __init__(self, config, logger, ndb, key=None):
self.ndb = ndb
self.logger = logger
self.config = config
self.last_view = []
self.status_bar = self.config.get_config('status_bar')
self.config.state.current_sort_mode = \
self.config.get_config('sort_mode')
self.log_lock = threading.Lock()
self.log_alarms = 0
self.logs = []
self.thread_sync = threading.Thread(
target=self.ndb.sync_worker,
args=[self.config.state.do_server_sync]
)
self.thread_sync.setDaemon(True)
self.view_titles = \
view_titles.ViewTitles(
self.config,
{
'ndb' : self.ndb,
'search_string' : None,
'log' : self.log
}
)
self.view_note = \
view_note.ViewNote(
self.config,
{
'ndb' : self.ndb,
'id' : key, # initial key to view or None
'log' : self.log
}
)
self.view_log = view_log.ViewLog(self.config, self.logger)
self.view_help = view_help.ViewHelp(self.config)
palette = \
[
(
'default',
self.config.get_color('default_fg'),
self.config.get_color('default_bg')
),
(
'status_bar',
self.config.get_color('status_bar_fg'),
self.config.get_color('status_bar_bg')
),
(
'log',
self.config.get_color('log_fg'),
self.config.get_color('log_bg')
),
(
'user_input_bar',
self.config.get_color('user_input_bar_fg'),
self.config.get_color('user_input_bar_bg')
),
(
'note_focus',
self.config.get_color('note_focus_fg'),
self.config.get_color('note_focus_bg')
),
(
'note_title_day',
self.config.get_color('note_title_day_fg'),
self.config.get_color('note_title_day_bg')
),
(
'note_title_week',
self.config.get_color('note_title_week_fg'),
self.config.get_color('note_title_week_bg')
),
(
'note_title_month',
self.config.get_color('note_title_month_fg'),
self.config.get_color('note_title_month_bg')
),
(
'note_title_year',
self.config.get_color('note_title_year_fg'),
self.config.get_color('note_title_year_bg')
),
(
'note_title_ancient',
self.config.get_color('note_title_ancient_fg'),
self.config.get_color('note_title_ancient_bg')
),
(
'note_date',
self.config.get_color('note_date_fg'),
self.config.get_color('note_date_bg')
),
(
'note_flags',
self.config.get_color('note_flags_fg'),
self.config.get_color('note_flags_bg')
),
(
'note_category',
self.config.get_color('note_category_fg'),
self.config.get_color('note_category_bg')
),
(
'note_content',
self.config.get_color('note_content_fg'),
self.config.get_color('note_content_bg')
),
(
'note_content_focus',
self.config.get_color('note_content_focus_fg'),
self.config.get_color('note_content_focus_bg')
),
(
'note_content_old',
self.config.get_color('note_content_old_fg'),
self.config.get_color('note_content_old_bg')
),
(
'note_content_old_focus',
self.config.get_color(
'note_content_old_focus_fg'
),
self.config.get_color(
'note_content_old_focus_bg'
)
),
(
'help_focus',
self.config.get_color('help_focus_fg'),
self.config.get_color('help_focus_bg')
),
(
'help_header',
self.config.get_color('help_header_fg'),
self.config.get_color('help_header_bg')
),
(
'help_config',
self.config.get_color('help_config_fg'),
self.config.get_color('help_config_bg')
),
(
'help_value',
self.config.get_color('help_value_fg'),
self.config.get_color('help_value_bg')
),
(
'help_descr',
self.config.get_color('help_descr_fg'),
self.config.get_color('help_descr_bg')
)
]
self.master_frame = urwid.Frame(
body=urwid.Filler(urwid.Text('')),
header=None,
footer=urwid.Pile([urwid.Pile([]), urwid.Pile([])]),
focus_part='body')
self.nncli_loop = urwid.MainLoop(self.master_frame,
palette,
handle_mouse=False)
self.nncli_loop.set_alarm_in(0, self._gui_init_view, \
bool(key))
def run(self):
"""Run the GUI"""
self.nncli_loop.run()
def _gui_header_clear(self):
"""Clear the console GUI header row"""
self.master_frame.contents['header'] = (None, None)
self.nncli_loop.draw_screen()
def _gui_header_set(self, widget):
"""Set the content of the console GUI header row"""
self.master_frame.contents['header'] = (widget, None)
self.nncli_loop.draw_screen()
def _gui_footer_log_clear(self):
"""Clear the log at the bottom of the GUI"""
gui = self._gui_footer_input_get()
self.master_frame.contents['footer'] = \
(urwid.Pile([urwid.Pile([]), urwid.Pile([gui])]), None)
self.nncli_loop.draw_screen()
def _gui_footer_log_set(self, pile):
"""Set the log at the bottom of the GUI"""
gui = self._gui_footer_input_get()
self.master_frame.contents['footer'] = \
(urwid.Pile([urwid.Pile(pile), urwid.Pile([gui])]), None)
self.nncli_loop.draw_screen()
def _gui_footer_log_get(self):
"""Get the log at the bottom of the GUI"""
return self.master_frame.contents['footer'][0].contents[0][0]
def _gui_footer_input_clear(self):
"""Clear the input at the bottom of the GUI"""
pile = self._gui_footer_log_get()
self.master_frame.contents['footer'] = \
(urwid.Pile([urwid.Pile([pile]), urwid.Pile([])]), None)
self.nncli_loop.draw_screen()
def _gui_footer_input_set(self, gui):
"""Set the input at the bottom of the GUI"""
pile = self._gui_footer_log_get()
self.master_frame.contents['footer'] = \
(urwid.Pile([urwid.Pile([pile]), urwid.Pile([gui])]), None)
self.nncli_loop.draw_screen()
def _gui_footer_input_get(self):
"""Get the input at the bottom of the GUI"""
return self.master_frame.contents['footer'][0].contents[1][0]
def _gui_footer_focus_input(self):
"""Set the GUI focus to the input at the bottom of the GUI"""
self.master_frame.focus_position = 'footer'
self.master_frame.contents['footer'][0].focus_position = 1
def _gui_body_set(self, widget):
"""Set the GUI body"""
self.master_frame.contents['body'] = (widget, None)
self._gui_update_status_bar()
self.nncli_loop.draw_screen()
def gui_body_get(self):
"""Get the GUI body"""
return self.master_frame.contents['body'][0]
def _gui_body_focus(self):
"""Set the GUI focus to the body"""
self.master_frame.focus_position = 'body'
def gui_update_view(self):
"""Update the GUI"""
if not self.config.state.do_gui:
return
try:
cur_key = self.view_titles.note_list \
[self.view_titles.focus_position].note['localkey']
except IndexError:
cur_key = None
self.view_titles.update_note_list(
self.view_titles.search_string,
sort_mode=self.config.state.current_sort_mode
)
self.view_titles.focus_note(cur_key)
if self.gui_body_get().__class__ == view_note.ViewNote:
self.view_note.update_note_view()
self._gui_update_status_bar()
def _gui_update_status_bar(self):
"""Update the GUI status bar"""
if self.status_bar != 'yes':
self._gui_header_clear()
else:
self._gui_header_set(self.gui_body_get().get_status_bar())
def _gui_switch_frame_body(self, new_view, save_current_view=True):
"""
Switch the body frame of the GUI. Used to switch to a new
view
"""
if new_view is None:
if not self.last_view:
self._gui_stop()
else:
self._gui_body_set(self.last_view.pop())
else:
if self.gui_body_get().__class__ != new_view.__class__:
if save_current_view:
self.last_view.append(self.gui_body_get())
self._gui_body_set(new_view)
def _delete_note_callback(self, key, delete):
"""Update the GUI after deleting a note"""
if not delete:
return
self.ndb.set_note_deleted(key, True)
if self.gui_body_get().__class__ == view_titles.ViewTitles:
self.view_titles.update_note_title()
self._gui_update_status_bar()
self.ndb.sync_worker_go()
def _gui_yes_no_input(self, args, yes_no):
"""Create a yes/no input dialog at the GUI footer"""
self._gui_footer_input_clear()
self._gui_body_focus()
self.master_frame.keypress = self._gui_frame_keypress
args[0](args[1],
yes_no in ['YES', 'Yes', 'yes', 'Y', 'y']
)
def _gui_search_input(self, args, search_string):
"""Create a search input dialog at the GUI footer"""
self._gui_footer_input_clear()
self._gui_body_focus()
self.master_frame.keypress = self._gui_frame_keypress
if search_string:
if self.gui_body_get() == self.view_note:
self.config.state.search_direction = args[1]
self.view_note.search_note_view_next(
search_string=search_string,
search_mode=args[0]
)
else:
self.view_titles.update_note_list(
search_string,
args[0],
sort_mode=self.config.state.current_sort_mode
)
self._gui_body_set(self.view_titles)
def _gui_category_input(self, args, category):
"""Create a category input at the GUI footer"""
self._gui_footer_input_clear()
self._gui_body_focus()
self.master_frame.keypress = self._gui_frame_keypress
if category is not None:
if self.gui_body_get().__class__ == view_titles.ViewTitles:
note = self.view_titles.note_list \
[self.view_titles.focus_position].note
else: # self.gui_body_get().__class__ == view_note.ViewNote:
note = self.view_note.note
self.ndb.set_note_category(note['localkey'], category)
if self.gui_body_get().__class__ == view_titles.ViewTitles:
self.view_titles.update_note_title()
else: # self.gui_body_get().__class__ == view_note.ViewNote:
self.view_note.update_note_view()
self._gui_update_status_bar()
self.ndb.sync_worker_go()
def _gui_pipe_input(self, args, cmd):
"""Create a pipe input dialog at the GUI footoer"""
self._gui_footer_input_clear()
self._gui_body_focus()
self.master_frame.keypress = self._gui_frame_keypress
if cmd is not None:
if self.gui_body_get().__class__ == view_titles.ViewTitles:
note = self.view_titles.note_list \
[self.view_titles.focus_position].note
else: # self.gui_body_get().__class__ == view_note.ViewNote:
note = self.view_note.old_note \
if self.view_note.old_note \
else self.view_note.note
try:
self._gui_clear()
pipe = subprocess.Popen(cmd, stdin=subprocess.PIPE, shell=True)
pipe.communicate(note['content'].encode('utf-8'))
pipe.stdin.close()
pipe.wait()
except OSError as ex:
self.log('Pipe error: %s' % ex)
finally:
self._gui_reset()
# pylint: disable=too-many-return-statements, too-many-branches
# pylint: disable=too-many-statements
def _gui_frame_keypress(self, size, key):
"""Keypress handler for the GUI"""
# convert space character into name
if key == ' ':
key = 'space'
contents = self.gui_body_get()
if key == self.config.get_keybind('quit'):
self._gui_switch_frame_body(None)
elif key == self.config.get_keybind('help'):
self._gui_switch_frame_body(self.view_help)
elif key == self.config.get_keybind('sync'):
self.ndb.last_sync = 0
self.ndb.sync_worker_go()
elif key == self.config.get_keybind('view_log'):
self.view_log.update_log()
self._gui_switch_frame_body(self.view_log)
elif key == self.config.get_keybind('down'):
if not contents.body.positions():
return None
last = len(contents.body.positions())
if contents.focus_position == (last - 1):
return None
contents.focus_position += 1
contents.render(size)
elif key == self.config.get_keybind('up'):
if not contents.body.positions():
return None
if contents.focus_position == 0:
return None
contents.focus_position -= 1
contents.render(size)
elif key == self.config.get_keybind('page_down'):
if not contents.body.positions():
return None
last = len(contents.body.positions())
next_focus = contents.focus_position + size[1]
if next_focus >= last:
next_focus = last - 1
contents.change_focus(size, next_focus,
offset_inset=0,
coming_from='above')
elif key == self.config.get_keybind('page_up'):
if not contents.body.positions():
return None
if 'bottom' in contents.ends_visible(size):
last = len(contents.body.positions())
next_focus = last - size[1] - size[1]
else:
next_focus = contents.focus_position - size[1]
if next_focus < 0:
next_focus = 0
contents.change_focus(size, next_focus,
offset_inset=0,
coming_from='below')
elif key == self.config.get_keybind('half_page_down'):
if not contents.body.positions():
return None
last = len(contents.body.positions())
next_focus = contents.focus_position + (size[1] // 2)
if next_focus >= last:
next_focus = last - 1
contents.change_focus(size, next_focus,
offset_inset=0,
coming_from='above')
elif key == self.config.get_keybind('half_page_up'):
if not contents.body.positions():
return None
if 'bottom' in contents.ends_visible(size):
last = len(contents.body.positions())
next_focus = last - size[1] - (size[1] // 2)
else:
next_focus = contents.focus_position - (size[1] // 2)
if next_focus < 0:
next_focus = 0
contents.change_focus(size, next_focus,
offset_inset=0,
coming_from='below')
elif key == self.config.get_keybind('bottom'):
if not contents.body.positions():
return None
contents.change_focus(size, (len(contents.body.positions()) - 1),
offset_inset=0,
coming_from='above')
elif key == self.config.get_keybind('top'):
if not contents.body.positions():
return None
contents.change_focus(size, 0,
offset_inset=0,
coming_from='below')
elif key == self.config.get_keybind('view_next_note'):
if self.gui_body_get().__class__ != view_note.ViewNote:
return key
if not self.view_titles.body.positions():
return None
last = len(self.view_titles.body.positions())
if self.view_titles.focus_position == (last - 1):
return None
self.view_titles.focus_position += 1
contents.update_note_view(
self.view_titles. \
note_list[self.view_titles. \
focus_position].note['localkey']
)
self._gui_switch_frame_body(self.view_note)
elif key == self.config.get_keybind('view_prev_note'):
if self.gui_body_get().__class__ != view_note.ViewNote:
return key
if not self.view_titles.body.positions():
return None
if self.view_titles.focus_position == 0:
return None
self.view_titles.focus_position -= 1
contents.update_note_view(
self.view_titles. \
note_list[self.view_titles. \
focus_position].note['localkey']
)
self._gui_switch_frame_body(self.view_note)
elif key == self.config.get_keybind('status'):
if self.status_bar == 'yes':
self.status_bar = 'no'
else:
self.status_bar = self.config.get_config('status_bar')
elif key == self.config.get_keybind('create_note'):
if self.gui_body_get().__class__ != view_titles.ViewTitles:
return key
self._gui_clear()
content = exec_cmd_on_note(None, self.config, self, self.logger)
self._gui_reset()
if content:
self.log('New note created')
self.ndb.create_note(content)
self.gui_update_view()
self.ndb.sync_worker_go()
elif key == self.config.get_keybind('edit_note') or \
key == self.config.get_keybind('view_note_ext') or \
key == self.config.get_keybind('view_note_json'):
if self.gui_body_get().__class__ != view_titles.ViewTitles and \
self.gui_body_get().__class__ != view_note.ViewNote:
return key
if self.gui_body_get().__class__ == view_titles.ViewTitles:
if not contents.body.positions():
return None
note = contents.note_list[contents.focus_position].note
else: # self.gui_body_get().__class__ == view_note.ViewNote:
if key == self.config.get_keybind('edit_note'):
note = contents.note
else:
note = contents.old_note if contents.old_note \
else contents.note
self._gui_clear()
if key == self.config.get_keybind('edit_note'):
content = exec_cmd_on_note(note, self.config, self,
self.logger)
elif key == self.config.get_keybind('view_note_ext'):
content = exec_cmd_on_note(
note,
self.config,
self,
self.logger,
cmd=get_pager(self.config, self.logger))
else: # key == self.config.get_keybind('view_note_json')
content = exec_cmd_on_note(
note,
self.config,
self,
self.logger,
cmd=get_pager(self.config, self.logger),
raw=True
)
self._gui_reset()
if not content:
return None
md5_old = hashlib.md5(note['content'].encode('utf-8')).digest()
md5_new = hashlib.md5(content.encode('utf-8')).digest()
if md5_old != md5_new:
self.log('Note updated')
self.ndb.set_note_content(note['localkey'], content)
if self.gui_body_get().__class__ == view_titles.ViewTitles:
contents.update_note_title()
else: # self.gui_body_get().__class__ == view_note.ViewNote:
contents.update_note_view()
self.ndb.sync_worker_go()
else:
self.log('Note unchanged')
elif key == self.config.get_keybind('view_note'):
if self.gui_body_get().__class__ != view_titles.ViewTitles:
return key
if not contents.body.positions():
return None
self.view_note.update_note_view(
contents.note_list[contents.focus_position]. \
note['localkey'])
self._gui_switch_frame_body(self.view_note)
elif key == self.config.get_keybind('pipe_note'):
if self.gui_body_get().__class__ != view_titles.ViewTitles and \
self.gui_body_get().__class__ != view_note.ViewNote:
return key
if self.gui_body_get().__class__ == view_titles.ViewTitles:
if not contents.body.positions():
return None
note = contents.note_list[contents.focus_position].note
else: # self.gui_body_get().__class__ == view_note.ViewNote:
note = contents.old_note if contents.old_note else contents.note
self._gui_footer_input_set(
urwid.AttrMap(
user_input.UserInput(
self.config,
key,
'',
self._gui_pipe_input,
None
),
'user_input_bar'
)
)
self._gui_footer_focus_input()
self.master_frame.keypress = \
self._gui_footer_input_get().keypress
elif key == self.config.get_keybind('note_delete'):
if self.gui_body_get().__class__ != view_titles.ViewTitles and \
self.gui_body_get().__class__ != view_note.ViewNote:
return key
if self.gui_body_get().__class__ == view_titles.ViewTitles:
if not contents.body.positions():
return None
note = contents.note_list[contents.focus_position].note
else: # self.gui_body_get().__class__ == view_note.ViewNote:
note = contents.note
self._gui_footer_input_set(
urwid.AttrMap(
user_input.UserInput(
self.config,
'Delete (y/n): ',
'',
self._gui_yes_no_input,
[
self._delete_note_callback,
note['localkey']
]
),
'user_input_bar'
)
)
self._gui_footer_focus_input()
self.master_frame.keypress = \
self._gui_footer_input_get().keypress
elif key == self.config.get_keybind('note_favorite'):
if self.gui_body_get().__class__ != view_titles.ViewTitles and \
self.gui_body_get().__class__ != view_note.ViewNote:
return key
if self.gui_body_get().__class__ == view_titles.ViewTitles:
if not contents.body.positions():
return None
note = contents.note_list[contents.focus_position].note
else: # self.gui_body_get().__class__ == view_note.ViewNote:
note = contents.note
favorite = not note['favorite']
self.ndb.set_note_favorite(note['localkey'], favorite)
if self.gui_body_get().__class__ == view_titles.ViewTitles:
contents.update_note_title()
self.ndb.sync_worker_go()
elif key == self.config.get_keybind('note_category'):
if self.gui_body_get().__class__ != view_titles.ViewTitles and \
self.gui_body_get().__class__ != view_note.ViewNote:
return key
if self.gui_body_get().__class__ == view_titles.ViewTitles:
if not contents.body.positions():
return None
note = contents.note_list[contents.focus_position].note
else: # self.gui_body_get().__class__ == view_note.ViewNote:
note = contents.note
self._gui_footer_input_set(
urwid.AttrMap(
user_input.UserInput(
self.config,
'Category: ',
note['category'],
self._gui_category_input,
None
),
'user_input_bar'
)
)
self._gui_footer_focus_input()
self.master_frame.keypress = \
self._gui_footer_input_get().keypress
elif key == self.config.get_keybind('search_gstyle') or \
key == self.config.get_keybind('search_regex') or \
key == self.config.get_keybind('search_prev_gstyle') or \
key == self.config.get_keybind('search_prev_regex'):
if self.gui_body_get().__class__ != view_titles.ViewTitles and \
self.gui_body_get().__class__ != view_note.ViewNote:
return key
if self.gui_body_get().__class__ == view_note.ViewNote:
if key == self.config.get_keybind('search_prev_gstyle') or \
key == self.config.get_keybind('search_prev_regex'):
self.view_note.search_direction = 'backward'
else:
self.view_note.search_direction = 'forward'
options = [
'gstyle' if key == self.config.get_keybind('search_gstyle')
or key == self.config.get_keybind('search_prev_gstyle')
else 'regex',
'backward' if key ==
self.config.get_keybind('search_prev_gstyle')
or key == self.config.get_keybind('search_prev_regex')
else 'forward'
]
caption = '{}{}'.format('(regex) '
if options[0] == 'regex'
else '',
'/' if options[1] == 'forward'
else '?')
self._gui_footer_input_set(
urwid.AttrMap(
user_input.UserInput(
self.config,
caption,
'',
self._gui_search_input,
options
),
'user_input_bar'
)
)
self._gui_footer_focus_input()
self.master_frame.keypress = \
self._gui_footer_input_get().keypress
elif key == self.config.get_keybind('search_next'):
if self.gui_body_get().__class__ != view_note.ViewNote:
return key
self.view_note.search_note_view_next()
elif key == self.config.get_keybind('search_prev'):
if self.gui_body_get().__class__ != view_note.ViewNote:
return key
self.view_note.search_note_view_prev()
elif key == self.config.get_keybind('clear_search'):
if self.gui_body_get().__class__ != view_titles.ViewTitles:
return key
self.view_titles.update_note_list(
None,
sort_mode=self.config.state.current_sort_mode
)
self._gui_body_set(self.view_titles)
elif key == self.config.get_keybind('sort_date'):
if self.gui_body_get().__class__ != view_titles.ViewTitles:
return key
self.config.state.current_sort_mode = 'date'
self.view_titles.sort_note_list('date')
elif key == self.config.get_keybind('sort_alpha'):
if self.gui_body_get().__class__ != view_titles.ViewTitles:
return key
self.config.state.current_sort_mode = 'alpha'
self.view_titles.sort_note_list('alpha')
elif key == self.config.get_keybind('sort_categories'):
if self.gui_body_get().__class__ != view_titles.ViewTitles:
return key
self.config.state.current_sort_mode = 'categories'
self.view_titles.sort_note_list('categories')
elif key == self.config.get_keybind('copy_note_text'):
if self.gui_body_get().__class__ != view_note.ViewNote:
return key
self.view_note.copy_note_text()
else:
return contents.keypress(size, key)
self._gui_update_status_bar()
return None
def _gui_init_view(self, loop, show_note):
"""Initialize the GUI"""
self.master_frame.keypress = self._gui_frame_keypress
self._gui_body_set(self.view_titles)
if show_note:
# note that title view set first to prime the view stack
self._gui_switch_frame_body(self.view_note)
self.thread_sync.start()
def _gui_clear(self):
"""Clear the GUI"""
self.nncli_loop.widget = urwid.Filler(urwid.Text(''))
self.nncli_loop.draw_screen()
def _gui_reset(self):
"""Reset the GUI"""
self.nncli_loop.widget = self.master_frame
self.nncli_loop.draw_screen()
def _gui_stop(self):
"""Stop the GUI"""
# don't exit if there are any notes not yet saved to the disk
# NOTE: this was originally causing hangs on exit with urllib2
# should not be a problem now since using the requests library
# ref https://github.com/insanum/sncli/issues/18#issuecomment-105517773
if self.ndb.verify_all_saved():
# clear the screen and exit the urwid run loop
self._gui_clear()
raise urwid.ExitMainLoop()
self.log('WARNING: Not all notes saved'
'to disk (wait for sync worker)')
def log(self, msg):
"""Log as message, displaying to the user as appropriate"""
self.logger.log(msg)
self.log_lock.acquire()
self.log_alarms += 1
self.logs.append(msg)
if len(self.logs) > int(self.config.get_config('max_logs')):
self.log_alarms -= 1
self.logs.pop(0)
log_pile = []
for log in self.logs:
log_pile.append(urwid.AttrMap(urwid.Text(log), 'log'))
if self.config.state.verbose:
self._gui_footer_log_set(log_pile)
self.nncli_loop.set_alarm_in(
int(self.config.get_config('log_timeout')),
self._log_timeout, None)
self.log_lock.release()
def _log_timeout(self, loop, arg):
"""
Run periodically to check for new log entries to append to
the GUI footer
"""
self.log_lock.acquire()
self.log_alarms -= 1
if self.log_alarms == 0:
self._gui_footer_log_clear()
self.logs = []
else:
if self.logs:
self.logs.pop(0)
log_pile = []
for log in self.logs:
log_pile.append(urwid.AttrMap(urwid.Text(log), 'log'))
if self.config.state.verbose:
self._gui_footer_log_set(log_pile)
self.log_lock.release()
| 2.015625 | 2 |
test/AzflowTest.py | aslotnick/azflow | 8 | 12769780 | <reponame>aslotnick/azflow
from unittest import TestCase
from azflow.DAG import DAG
from azflow.Task import Task
from azflow.AzflowException import AzflowException
class AzflowTest(TestCase):
def test_dag_validate(self):
# create a valid DAG
test_dag = DAG(dag_id='test_dag')
task_1 = Task(task_id='task_1', dag=test_dag)
task_2 = Task(task_id='task_2', dag=test_dag)
task_2.set_upstream(task_1)
task_3 = Task(task_id='task_3', dag=test_dag)
task_3.set_upstream(task_1)
task_4 = Task(task_id='task_4', dag=test_dag)
task_4.set_upstream(task_2)
task_4.set_upstream(task_3)
test_dag._prepare_flow()
self.assertEqual(None, test_dag._validate())
# introduce a cycle
task_1.set_upstream(task_4)
test_dag._prepare_flow()
with self.assertRaises(AzflowException):
test_dag._validate()
def test_task_cycle(self):
test_dag = DAG(dag_id='test_dag')
task_1 = Task(task_id='task_1', dag=test_dag)
task_2 = Task(task_id='task_2', dag=test_dag)
task_2.set_upstream(task_1)
#try to introduce a task that depends on a direct dependency
with self.assertRaises(AzflowException):
task_1.set_upstream(task_2)
| 2.5625 | 3 |
src/test/python/unit/context/test_context.py | ettoreleandrotognoli/etto-robot | 0 | 12769781 | from aiounittest import AsyncTestCase
from urllib.parse import urlparse
from robot.api import Context
from robot.context.core import ContextImpl
class ResolveUrlContextImplTest(AsyncTestCase):
context: Context = None
@classmethod
def setUpClass(cls):
cls.context = ContextImpl(url=urlparse('http://example.com/path1/page1?q=query#element-1'))
async def test_resolve_absolute_url(self):
absolute_url = 'https://http.cat/102'
result = self.context.resolve_url(absolute_url)
self.assertEqual(absolute_url, result)
async def test_resolve_scheme(self):
url = '//http.cat/102'
result = self.context.resolve_url(url)
self.assertEqual('http:' + url, result)
async def test_absolute_path(self):
url = '/path2/page2'
result = self.context.resolve_url(url)
self.assertEqual(
'http://example.com/path2/page2',
result
)
async def test_relative_path(self):
url = 'page2'
result = self.context.resolve_url(url)
self.assertEqual(
'http://example.com/path1/page2',
result
)
| 2.671875 | 3 |
abc170/e_tle.py | nishio/atcoder | 1 | 12769782 | <filename>abc170/e_tle.py
from collections import defaultdict
N, Q = [int(x) for x in input().split()]
rate = [None] # 1-origin
k_to_ps = defaultdict(list) # 1-origin
p_to_k = [None]
for i in range(N):
A, B = [int(x) for x in input().split()]
rate.append(A)
k_to_ps[B].append(i + 1)
p_to_k.append(B)
for i in range(Q):
C, D = [int(x) for x in input().split()]
frm = p_to_k[C]
to = D
k_to_ps[frm].remove(C)
k_to_ps[to].append(C)
buf = []
for ps in k_to_ps.values():
print(ps)
if ps:
buf.append(max(rate[p] for p in ps))
print("max", buf)
print(min(buf))
| 2.34375 | 2 |
datasets/spm_dataset.py | chinaliwenbo/ChineseBert | 298 | 12769783 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@file : spm_dataset.py
@author: zijun
@contact : <EMAIL>
@date : 2021/1/21 15:00
@version: 1.0
@desc : Dataset for sentence pair matching tasks
"""
from functools import partial
import torch
from torch.utils.data import DataLoader
from datasets.chinese_bert_dataset import ChineseBertDataset
from datasets.collate_functions import collate_to_max_length
class SPMDataset(ChineseBertDataset):
def get_lines(self):
with open(self.data_path, 'r') as f:
lines = f.readlines()
return lines
def __len__(self):
return len(self.lines)
def __getitem__(self, idx):
line = self.lines[idx]
third, first, second, _ = line.split('\t')
first = first.replace(" ", "")
second = second.replace(" ", "")
first_output = self.tokenizer.encode(first, add_special_tokens=False)
first_pinyin_tokens = self.convert_sentence_to_pinyin_ids(first, first_output)
second_output = self.tokenizer.encode(second, add_special_tokens=False)
second_pinyin_tokens = self.convert_sentence_to_pinyin_ids(second, second_output)
label = third
# convert sentence to id
bert_tokens = first_output.ids + [102] + second_output.ids
pinyin_tokens = first_pinyin_tokens + [[0] * 8] + second_pinyin_tokens
if len(bert_tokens) > self.max_length - 2:
bert_tokens = bert_tokens[:self.max_length - 2]
pinyin_tokens = pinyin_tokens[:self.max_length - 2]
# id nums should be same
assert len(bert_tokens) <= self.max_length
assert len(bert_tokens) == len(pinyin_tokens)
# convert list to tensor
input_ids = torch.LongTensor([101] + bert_tokens + [102])
pinyin_ids = torch.LongTensor([[0] * 8] + pinyin_tokens + [[0] * 8]).view(-1)
label = torch.LongTensor([int(label)])
return input_ids, pinyin_ids, label
def unit_test():
data_path = "/data/nfsdata2/sunzijun/glyce/tasks/BQ/dev.tsv"
chinese_bert_path = "/data/nfsdata2/sunzijun/glyce/best/ChineseBERT-base"
dataset = SPMDataset(data_path=data_path, chinese_bert_path=chinese_bert_path)
dataloader = DataLoader(
dataset=dataset,
batch_size=10,
num_workers=0,
shuffle=False,
collate_fn=partial(collate_to_max_length, fill_values=[0, 0, 0])
)
for input_ids, pinyin_ids, label in dataloader:
bs, length = input_ids.shape
print(input_ids.shape)
print(pinyin_ids.reshape(bs, length, -1).shape)
print(label.view(-1).shape)
print()
if __name__ == '__main__':
unit_test()
| 2.65625 | 3 |
sketches/Sampler.py | DanielTing/datasketches-experimentation | 1 | 12769784 | <filename>sketches/Sampler.py
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import random
import numpy as np
from random import uniform
from sortedcontainers import SortedList
from collections import namedtuple
Sample = namedtuple('Sample', ['priority', 'weight', 'item'])
from numba import jit
class Sampler:
def __init__(self, seed=None):
self.buffer = SortedList()
self.threshold = float("inf")
self.processed = 0
self.seed = seed
self.rng = random.Random(seed)
def cdf(self, v, weight):
F = weight * v
return min(1.0, F)
def priority(self, item, weight):
return self.rng.uniform(0,1./weight)
def pop(self):
T, w, x = self.buffer.pop()
self.threshold = T
return T, w, x
def reduceSampleSize(self, k):
while len(self.buffer) > k:
self.pop()
def postAdd(self, item, weight):
pass
def add(self, item, weight):
self.processed += 1
R = self.priority(item, weight)
if R < self.threshold:
self.buffer.add(Sample(R, weight,item))
self.postAdd(item, weight)
def getThreshold(self, item, w):
return self.threshold
def items(self):
"""
Generator which returns item, pseudo-inclusion probability
"""
for R, w, x in self.buffer:
yield x, self.inc_probability(self.getThreshold(x, w), w)
def inc_probability(self, x, w):
return self.cdf(x, w)
##################################################################################################################
def filterSum(sampler, eval = lambda x: x, predicate = lambda x: True):
s = 0
var = 0
for x, pi in sampler.items():
if predicate(x):
#pi = sampler.inc_probability(x, w)
v = eval(x)
s += v / pi
var += v*v * (1.0-pi) / (pi*pi)
return s, var
class BottomKSampler(Sampler):
def __init__(self, k):
super().__init__()
self.k = k
def postAdd(self, item, weight):
self.reduceSampleSize(self.k)
#################################################################################################################
class SpaceBoundedSampler(Sampler):
def __init__(self, budget, len=len):
super().__init__()
self.budget = budget
self.size = 0
self.len = len
def pop(self):
T, w, x = super().pop()
self.size -= self.len(x)
def compact(self, budget):
while self.size > budget:
self.pop()
def postAdd(self, item, weight):
self.size += self.len(item)
self.compact(self.budget)
##################################################################################################################
import xxhash
from enum import Enum
BIGVAL64 = (2**64-1)
class MultiStratifiedSampler(Sampler):
"""
Only compact the sample if the total sample size gets too large
"""
UNKNOWN = 0
REMOVE_CANDIDATE = 1
NOT_CANDIDATE = 2
def __init__(self, num_objectives, target_size, slack=1.2, seed=None):
self.target_size = target_size
self.buffer = SortedList()
self.thresholds = [float("inf") for i in range(num_objectives)]
self.composite_threshold = float("inf")
self.min_size_per_objective = target_size+1
self.slack = slack
self.seed = seed
self.rng = self.Random(seed)
# use hash based
def item_rv(self, x):
# h = xxhash.xxh64(str(x))
# z = h.intdigest() / BIGVAL64
z = self.rng.uniform(0,1)
return z
def priority(self, U, x, weight):
return [U / w for w in weight]
def getThreshold(self, item, w):
return self.thresholds
def cdf(self, v, weight):
p = 0.
for x, w in zip(v, weight):
F = min(1.0, w * x)
p = max(p, F)
return p
def pop(self):
raise Exception
@classmethod
def lt(cls, priority, threshold):
for r, t in zip(priority, threshold):
if r < t:
return True
return False
def getSizePerObjective(self):
size_per_objective = [0]*len(self.thresholds)
for R, w, x in self.buffer:
#R = self.priority(U,x,w)
for i, (r, t) in enumerate(zip(R, self.thresholds)):
if r < t:
size_per_objective[i] += 1
return size_per_objective
def compact(self):
while len(self.buffer) > self.target_size * self.slack:
if self.min_size_per_objective > self.target_size:
self.min_size_per_objective = max(self.getSizePerObjective())
self.min_size_per_objective = int(self.min_size_per_objective / self.slack)
#print("resize per obj: ", self.min_size_per_objective, len(self.buffer))
self.compactToSize(self.min_size_per_objective)
#print(len(self.buffer))
def getScaledThresholds(self, min_size_per_objective):
num_objectives = len(self.thresholds)
scaled_thresholds = []
for i in range(num_objectives):
priorities = [R[i] for R, w, x in self.buffer]
priorities.sort()
scaled_thresholds.append( priorities[min_size_per_objective+1] )
return scaled_thresholds
def compactToSize(self, min_size_per_objective):
self.thresholds = self.getScaledThresholds(min_size_per_objective)
new_buffer = SortedList()
for s in self.buffer:
if self.lt(s.priority, self.thresholds):
new_buffer.add(s)
self.buffer = new_buffer
def add(self, item, weight):
U = self.item_rv(x)
R = self.priority(U, item, weight)
if self.lt(R, self.thresholds):
self.buffer.add(Sample(R, weight, item))
self.compact()
from math import sqrt
##################################################################################################################
#= namedtuple('TopKItem', ['priority', 'item', 'threshold', 'count'])
class TopKItem:
def __init__(self, priority, item, weight, threshold, count):
self.priority = priority
self.item = item
self.weight = weight
self.threshold = threshold
self.count = count
def __lt__(self, other):
return self.priority < other.priority
# 1/min(1, threshold) is a hack
def nhat(self, f=lambda x: x):
#return f(self.item) *
return (1/min(1, self.threshold) + self.count)
def __str__(self):
return f"{self.item} {self.weight} {self.count}"
class TopKSampler(Sampler):
def __init__(self, topk, maxsize, seed=None):
self.topk = topk
self.maxsize = maxsize
self.buffer = [] #SortedList()
self.heavy_set = set()
self.item_dict = {}
self.threshold = float("inf")
self.processed = 0
self.seed = seed
self.rng = random.Random(seed)
def size(self):
return len(self.item_dict)
def nhat_infreq(self):
return 1. / self.threshold
def is_infreq(self, item, mingap=0):
y = self.item_dict[item]
if y.count == 0 or y.nhat() - mingap < self.nhat_infreq():
return True
return False
def getNumHeavy(self):
return sum([not self.is_infreq(x, 0.1*self.nhat_infreq()) for x in self.heavy_set])
def items(self):
"""
Generator which returns item, pseudo-inclusion probability
"""
for topk_item in self.buffer:
R = topk_item.priority
w = topk_item.weight
x = topk_item.item
yield x, self.inc_probability(self.getThreshold(x, w), w)
def getTotal(self):
return sum([x.nhat() for x in self.buffer])
def compact(self):
if self.getNumHeavy() <= self.topk and self.size() <= self.maxsize:
return
self.buffer.sort()
#print("compact", self.size(), self.getNumHeavy(), self.threshold, self.getTotal(), self.processed)
while self.size() > self.maxsize or self.getNumHeavy() > self.topk:
topk_item = self.buffer.pop()
del self.item_dict[topk_item.item]
if topk_item.item in self.heavy_set:
self.heavy_set.remove(topk_item.item)
self.threshold = min(self.threshold, topk_item.priority) # shouldn't need take min
def add(self, item, weight):
self.processed += 1
if item in self.item_dict:
topk_item = self.item_dict[item]
#print("initinc", topk_item.priority, topk_item.count, item, weight)
nhat = topk_item.nhat()
topk_item.priority *= nhat / (nhat+1)
topk_item.count += 1
#print("inc", topk_item.priority, topk_item.count, item, weight)
if not self.is_infreq(item):
self.heavy_set.add(item)
return
R = self.priority(item, weight)
#print("add", item, R)
if R < self.threshold:
entry = TopKItem(R, item, weight, self.threshold, 0)
self.buffer.append(entry)
self.item_dict[item] = entry
self.compact()
##################################################################################################################
| 2.421875 | 2 |
leapp/messaging/testing.py | drehak/leapp | 0 | 12769785 | <reponame>drehak/leapp<filename>leapp/messaging/testing.py
from leapp.messaging import BaseMessaging
class TestMessaging(BaseMessaging):
"""
This class implements a messaging implementation made for unit tests - Data is only stored in memory and
not stored in a database.
"""
def __init__(self):
super(TestMessaging, self).__init__(stored=False)
def feed(self, *messages):
self._data.extend([message.dump() for message in messages])
def _process_message(self, message):
return message
def _perform_load(self, consumes):
pass
| 2.609375 | 3 |
src/medbot_xmpp.py | mogron/ep_dyntest | 6 | 12769786 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A chat bot based on SleekXMPP
Scope parameter is https://www.googleapis.com/auth/googletalk
Best is the generate the request with the OauthPlayground:
https://developers.google.com/oauthplayground/
"""
from __future__ import absolute_import, division, print_function
import sys
import logging
import random
from datetime import datetime, timedelta
from time import sleep
from sleekxmpp import ClientXMPP
from sleekxmpp.exceptions import IqError, IqTimeout
from oauth import OAuth
__author__ = '<NAME>'
__copyright__ = 'Blue Yonder'
__license__ = 'new BSD'
_logger = logging.getLogger(__name__)
if sys.version_info < (3, 0):
from sleekxmpp.util.misc_ops import setdefaultencoding
setdefaultencoding('utf8')
else:
raw_input = input
class ChatClient(ClientXMPP):
def __init__(self, jid, oauth):
ClientXMPP.__init__(self, jid, password=<PASSWORD>)
self.oauth = oauth
self.msg_callback = None
self.add_event_handler("session_start", self.session_started, threaded=True)
self.add_event_handler("message", self.message_received)
# Plugins
self.register_plugin('xep_0030') # Service Discovery
#self.register_plugin('xep_0199', {'keepalive': True, 'frequency': 60}) # XMPP Ping
#self.register_plugin('xep_0235')
#self.register_plugin('google')
#self.register_plugin('google_auth')
def add_msg_callback(self, func):
self.msg_callback = func
def get_recipient_id(self, recipient):
for k in self.client_roster.keys():
if self.client_roster[k]['name'] == recipient:
recipient_id = k
break
else:
recipient_id = None
return recipient_id
def connect(self, *args, **kwargs):
_logger.info("Connecting...")
self.credentials['access_token'] = self.oauth.access_token
return super(ChatClient, self).connect(*args, **kwargs)
def reconnect(self, *args, **kwargs):
_logger.info("Reconnecting")
self.credentials['access_token'] = self.oauth.access_token
return super(ChatClient, self).reconnect(*args, **kwargs)
def session_started(self, event):
self.send_presence()
try:
self.get_roster()
except IqError as err:
logging.error('There was an error getting the roster')
logging.error(err.iq['error']['condition'])
self.disconnect()
except IqTimeout:
logging.error('Server is taking too long to respond')
self.disconnect()
def send_msg(self, recipient, msg):
recipient_id = self.get_recipient_id(recipient)
self.send_message(mto=recipient_id, mbody=msg, mtype='chat')
def message_received(self, msg):
_logger.info("Got message from {}".format(msg['from']))
if self.msg_callback is None:
_logger.warn("No callback for message received registered")
else:
self.msg_callback(msg)
class State(object):
not_asked = 0
asked = 1
class MedBot(object):
alarm = ['Have you taken your long-acting insulin analogue?',
'Hey buddy, got your insulin?',
'Have you taken your daily dose of insulin?']
reminder = ['how about now?',
'and now?',
'... maybe now?']
praise = ['Great!', 'Good for you!', 'Well done']
give_up = ["Okay, I'am giving up!", "It can't be helped!"]
def __init__(self, chat_client, recipient, max_retries=5):
self.chat_client = chat_client
self.chat_client.add_msg_callback(self.handle_message)
self.recipient = recipient
self.positive_reply = False
self.curr_state = State.not_asked
self.max_retries = max_retries
self.retries = 0
self.retry_sleep = 1200
def send_alarm(self):
_logger.info("Alarm triggered")
self.positive_reply = False
self.curr_state = State.asked
self.retries = 0
self.chat_client.send_msg(self.recipient,
random.choice(self.alarm))
while not self.positive_reply:
sleep(self.retry_sleep)
if not self.ask_again():
self.curr_state = State.not_asked
break
def ask_again(self):
_logger.info("Asking again?")
if not self.positive_reply:
if self.retries < self.max_retries:
self.retries += 1
msg = random.choice(self.reminder)
answer = True
else:
msg = random.choice(self.give_up)
answer = False
self.chat_client.send_msg(self.recipient, msg)
return answer
def handle_message(self, msg):
recipient_id = self.chat_client.get_recipient_id(self.recipient)
from_recipient = msg['from'].full.startswith(recipient_id)
is_positive = msg['body'].lower().startswith('yes')
was_asked = self.curr_state == State.asked
if from_recipient and is_positive and was_asked:
_logger.info("Positive reply received")
self.positive_reply = True
self.curr_state = State.not_asked
self.chat_client.send_msg(self.recipient,
random.choice(self.praise))
def _get_secs_to(self, timestamp):
delta = timestamp - datetime.now()
return delta.total_seconds()
def _get_next_alarm(self):
today = datetime.now()
today_alarm = datetime(today.year, today.month, today.day, 18, 40, 0)
if (today_alarm - today - timedelta(seconds=15)).days >= 0:
return today_alarm
else:
return today_alarm + timedelta(days=1)
def run(self):
if self.chat_client.connect():
self.chat_client.process(block=False)
while True:
sleep(self._get_secs_to(self._get_next_alarm()))
self.send_alarm()
else:
raise RuntimeError("Unable to connect!")
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO,
format='%(levelname)-8s %(message)s',
stream=sys.stdout)
oauth = OAuth()
oauth.read_cfg('oauth.cfg')
jid = '<EMAIL>'
chat_client = ChatClient(jid, oauth)
medbot = MedBot(chat_client, 'Buddy')
medbot.run()
| 2.46875 | 2 |
kvoter/home.py | geokala/k-voter | 0 | 12769787 | <filename>kvoter/home.py
from flask import render_template, request
from kvoter import app
from kvoter.db import Election, Candidate, User
from wtforms import Form, IntegerField, validators
class VoteForm(Form):
election_id = IntegerField(
'Election ID',
[
validators.Required(),
],
)
@app.route("/")
def home_view():
form = VoteForm(request.form)
candidates = Candidate.query.all()
elections = Election.query.all()
users = {
user.id: user
for user in User.query.all()
}
elections = [
{
'type': election.election_type,
'location': election.location,
'candidates': [users[candidate.user_id]
for candidate in candidates
if candidate.election_id == election.id],
}
for election in elections
]
if request.method == 'POST' and form.validate():
pass
return render_template("home.html", elections=elections)
| 2.578125 | 3 |
phase1/prediction.py | lyubom/ATMO_Data_Challenge | 0 | 12769788 | <reponame>lyubom/ATMO_Data_Challenge
# -*- coding: utf-8 -*-
from utils import load_data,get_data_day
import numpy as np
import pickle
import datetime
import calendar
import time
import os
import sys
import pickle
from keras.models import model_from_json
from keras import optimizers
#### Predict function for a given day
dirname = os.path.dirname(__file__)
# filename = os.path.join(dirname, 'mean.npz')
# load model
# model_filename = os.path.join(dirname, "models/lstm_model")
model_filename = os.path.join(dirname, "models/randomforest2.sav")
mean_filename = os.path.join(dirname, "tmp/mean_coord.npz")
# RANDOM FOREST MODEL
mean = np.load(mean_filename)['a']
loaded_model = pickle.load(open(model_filename, 'rb'))
print("Loaded model from disk")
def predict(day,sites,chimeres_day,geops_day,meteo_day,concentrations_day,model=None):
"""
day: day of the year (1-365)
sites : Dataframe with columns "idPolair","nom_station","coord_x_l93","coord_y_l93","X_lamb2","Y_lamb2", "LON" ,"LAT",
"Département","Zone_EPCI","typologie","NO2_influence", "NO2_2012", "NO2_2013","NO2_2014","NO2_2015", "NO2_2016","NO2_2017","O3_influence","O3_2012", "O3_2013", "O3_2014", "O3_2015","O3_2016" "PM10_2017","PM25_influence" "PM25_2012","PM25_2013","PM25_2014","O3_2017","PM10_influence" "PM10_2012","PM10_2013","PM10_2014","PM10_2015","PM10_2016","PM25_2015","PM25_2016","PM25_2017".
chimeres_day Dict on Pollutants, for each pollutant a Dataframe with columns 'date', 'val', 'idPolair', 'param'. Stopped at D0+72H
geops_day : Dict on sites, for each site a Dataframe with columns 'date', 'idPolair', 'geop_p_500hPa', 'geop_p_850hPa'. Stopped at D0+6H
meteo_day : Dataframe with columns "date", "idPolair", "T2", "Q2", "U10", "V10" "PSFC", "PBLH", "LH", "HFX", "ALBEDO", "SNOWC", "HR2", "VV10", "DV10", "PRECIP". Stopped at D0+6H
concentrations_day : Dict on Pollutants, for each pollutant, a dataframe with columns 'idPolair', 'Organisme', 'Station', 'Mesure', 'Date', 'Valeur'. Stopped at D0+6H
model : pretrained model data (e.g. saved learned sklearn model) if you have one. Change its default value with a relative path if you want to load a file
"""
# Prediction step: up to you !
results = dict({})
for pol in ["PM10","PM25","O3","NO2"]:
results[pol] = dict({})
concentrations_pol = concentrations_day[pol]
for idPolair in sites.idPolair:
concentrations_pol_site = concentrations_pol[concentrations_pol.idPolair==idPolair]
results[pol][idPolair] = dict({})
if np.sum(~np.isnan(concentrations_pol_site.Valeur))!=0:
tmp = concentrations_pol_site.Valeur
# last_values = np.tile(tmp[72:], 3)[0:17]
# tmp = np.concatenate((tmp, last_values))
# previous_data = tmp.reshape([1, 4, 24])
previous_data = np.array(tmp)
if np.isnan(previous_data).any():
if idPolair != '33374':
chimeres_site = chimeres_day[pol].loc[chimeres_day[pol].idPolair == float(idPolair)]
else:
chimeres_site = chimeres_day[pol].loc[chimeres_day[pol].idPolair == 15114.]
inds = np.where(np.isnan(previous_data))
previous_data[inds] = chimeres_site['val'].iloc[inds]
# IF DATA TYPE 2 IS USED
if len(idPolair) == 4:
site = sites.loc[sites.idPolair == ('0'+idPolair)]
else:
site = sites.loc[sites.idPolair == idPolair]
tmp = np.array(site.loc[:, "coord_x_l93":"coord_y_l93"])[0] - mean
tmp = np.concatenate(([convert_pol(pol)], tmp))
previous_data = np.concatenate((tmp, previous_data))
previous_data = np.expand_dims(previous_data, axis=0)
result = loaded_model.predict(previous_data)
for i, horizon in enumerate(["D0","D1","D2"]):
results[pol][idPolair][horizon] = result[0, i*24:(i+1)*24]
results[pol][idPolair]["D0"][0:7] = previous_data[0, 72:79]
else:
for horizon in ["D0","D1","D2"]:
results[pol][idPolair][horizon] = np.zeros(24)
return results
# result format
# dict[pol][site][horizon][array of size 24 (hourly prediction)]
# results = dict({})
# for pol in ["PM10","PM25","O3","NO2"]:
# results[pol] = dict({})
# concentrations_pol = concentrations_day[pol]
# for idPolair in sites.idPolair:
# concentrations_pol_site = concentrations_pol[concentrations_pol.idPolair==idPolair]
# results[pol][idPolair] = dict({})
# for horizon in ["D0","D1","D2"]:
#
# ####### your prediction step
# if np.sum(~np.isnan(concentrations_pol_site.Valeur))!=0:
# results[pol][idPolair][horizon] = np.ones(24)*np.nanmean(concentrations_pol_site.Valeur) #dummy example where we just copy for each hour the mean concentration of all previous available days
# else:
# results[pol][idPolair][horizon] = np.zeros(24)
# ######
#
def convert_pol(pol):
if pol == "PM10":
return 4
elif pol == "PM25":
return 3
elif pol == "NO2":
return 2
elif pol == "O3":
return 1
def convert_data_day(day, pol, idPolair, sites, chimeres_day, geops_day, meteo_day, concentrations_day):
measures = 3*24 + 7
ids = 3
rows = 3*24 + 7
columns = 18
data = np.empty(shape=[rows, columns])
# data = np.empty(shape=[ids+measures])
if len(idPolair) == 4:
site = sites.loc[sites.idPolair == ('0'+idPolair)]
else:
site = sites.loc[sites.idPolair == idPolair]
if idPolair != '33374':
meteo_site = meteo_day.loc[meteo_day.idPolair == float(idPolair)]
else:
meteo_site = meteo_day.loc[meteo_day.idPolair == 15114.]
pol_ = convert_pol(pol)
# if idPolair in geops_day.keys():
# geops_site = geops_day[idPolair]
# else:
# mean_geop_p_500hPa = []
# mean_geop_p_850hPa = []
# for idPolair_ in geops_day.keys():
# mean_geop_p_500hPa.append(np.nanmean(geops_day[idPolair_].geop_p_500hPa))
# mean_geop_p_850hPa.append(np.nanmean(geops_day[idPolair_].geop_p_850hPa))
if idPolair[0] == '0':
concentrations_site = concentrations_day[pol].loc[concentrations_day[pol].idPolair == idPolair[1:]]
else:
concentrations_site = concentrations_day[pol].loc[concentrations_day[pol].idPolair == idPolair]
if idPolair != '33374':
chimeres_site = chimeres_day[pol].loc[chimeres_day[pol].idPolair == float(idPolair)]
else:
chimeres_site = chimeres_day[pol].loc[chimeres_day[pol].idPolair == 15114.]
data[:, 0] = pol_*np.ones(shape=[rows])
tmp = np.array(site.loc[:, "coord_x_l93":"coord_y_l93"])
data[:, 1:3] = np.repeat(tmp, rows, axis=0)
data[:, 3:17] = meteo_site.loc[:, 'T2':'PRECIP']
# print(concentrations_day[pol])
data[:, 17] = concentrations_site.Valeur
# print(concentrations_site.Valeur)
# print(concentrations_site.Valeur.iloc[6])
# if np.isnan(data[3:]).any():
# for i in range(measures):
# chimeres_site_date = chimeres_site.iloc[i]
# data[i+3] = chimeres_site_date.val
if np.isnan(data[:, 17]).any():
inds = np.where(np.isnan(data[:, 17]))
data[:, 17][inds] = chimeres_site.val.iloc[inds]
return data
# print(concentrations_site)
# for i in range(measures):
# date = np.unique(meteo_day['date'])[i]
# # data[i, 1] = site.coord_x_l93
# # data[i, 2] = site["coord_y_l93"]
# # data[i, 3] = site["X_lamb2"]
# # data[i, 4] = site["Y_lamb2"]
# # data[i, 5] = site["LON"]
# # data[i, 6] = site["LAT"]
#
# # date = meteo_day.date.iloc[i]
#
#
# # meteo_site_date = meteo_site.loc[meteo_site.date == date]
#
#
#
# # data [i, 7:21] = meteo_site_date.loc[:, 'T2':'PRECIP']
# #
# # if idPolair in geops_day.keys():
# #
# # geops_site_date = geops_site.loc[geops_site.date == date]
# #
# # # data[i, 21] = geops_site_date.geop_p_500hPa
# # # data[i, 22] = geops_site_date.geop_p_850hPa
# # data[i, 21:23] = geops_site_date.loc[:, "geop_p_500hPa":"geop_p_850hPa"]
# # else:
# #
# # data[i, 21] = np.mean(mean_geop_p_500hPa)
# # data[i, 22] = np.mean(mean_geop_p_850hPa)
#
#
#
# concentrations_site_date = concentrations_site.loc[concentrations_site.date == date]
#
#
# # chimeres_site = chimeres_day[pol].loc[chimeres_day[pol].idPolair == idPolair]
# # chimeres_site_date = chimeres_site.loc[chimeres_site.date == date]
# # print(concentrations_site_date.Valeur)
# # print('sep')
#
#
# # data[i + 3] = concentrations_site_date.Mesure
# if not np.isnan(concentrations_site_date.Valeur.iloc[0]):
# data[i + 3] = concentrations_site_date.Valeur
# else:
# chimeres_site_date = chimeres_site.loc[chimeres_site.date == date]
# data[i + 3] = chimeres_site_date.val
# # print(concentrations_site_date.Valeur)
# # print(chimeres_site_date.val)
# data[i, 7] = meteo_day[]
# sys.exit(1)
#### Main loop (no need to be changed)
def run_predict(year=2016,max_days=3,dirname="../Data/training",list_days=None):
"""
year : year to be evaluated
max_days: number of past days allowed to predict a given day (set to 10 on the platform)
dirname: path to the dataset
list_days: list of days to be evaluated (if None the full year is evaluated)
"""
overall_start = time.time() # <== Mark starting time
data = load_data(year=year,dirname=dirname) # load all data files
sites = data["sites"] #get sites info
day_results = dict({})
if list_days is None:
if calendar.isleap(year): # check if year is leap
list_days = range(3, 366)
else:
list_days = range(3, 365)
for day in list_days:
print(day)
chimeres_day,geops_day,meteo_day,concentrations_day = get_data_day(day,data,max_days=max_days,year=year) # you will get an extraction of the year datasets, limited to the past max_days for each day
day_results[day] = predict(day,sites,chimeres_day,geops_day,meteo_day,concentrations_day) # do the prediction
overall_time_spent = time.time() - overall_start # end computation time
pickle.dump(day_results, open('submission/results.pk', 'wb')) #save results
pickle.dump(overall_time_spent, open('submission/time.pk', 'wb')) #save computation time
| 2.484375 | 2 |
vispy/util/tests/test_key.py | MatthieuDartiailh/vispy | 0 | 12769789 | from nose.tools import assert_raises, assert_true, assert_equal
from vispy.util.keys import Key, ENTER
def test_key():
"""Test basic key functionality"""
def bad():
return (ENTER == dict())
assert_raises(ValueError, bad)
assert_true(not (ENTER == None)) # noqa
assert_equal('Return', ENTER)
print(ENTER.name)
print(ENTER) # __repr__
assert_equal(Key('1'), 49) # ASCII code
| 2.59375 | 3 |
tests/developer/Li_PBC/relax_vol/vol_0.97/os_opt/run.py | pyflosic/pyeff | 3 | 12769790 | <filename>tests/developer/Li_PBC/relax_vol/vol_0.97/os_opt/run.py
from pyeff_calculator_pbc import *
import time
t0 = time.clock()
# single point
print 'Single Point Calculation'
p_cfg = 'pyeff.cfg'
calc = pyeff_pbc(p_cfg=p_cfg,scale=1.0)
calc.initialize()
calc.show_all()
print(calc.get_energy())
t = time.clock() - t0
print('Timing: %0.5f s' % t)
| 1.945313 | 2 |
test/shapes/test_lineJoin.py | XRDX/pyleap | 6 | 12769791 | from pyleap import *
c = Circle(200, 200, 100)
t = Triangle(120, 130, 400, 300, 200, 230)
l = Line(100, 500, 500, 420)
r = Rectangle(40, 50, 100, 100)
c.color = "#ff000090"
l.color = "#00ff0090"
r.color = "#0000ff90"
r.rotation = 45
r.line_width = 20
c.line_width = 20
l.line_width = 100
t.line_width = 15
@repeat
def draw(dt):
window.clear()
t.stroke()
c.stroke()
l.stroke()
r.stroke()
window.show_fps()
run() | 2.78125 | 3 |
experimental/ipynb/jsplugins/ipynbgalry/generate_scene.py | rossant/galry | 55 | 12769792 | <filename>experimental/ipynb/jsplugins/ipynbgalry/generate_scene.py
from galry import *
s = SceneCreator()
x = np.linspace(-1., 1., 1000)
y = np.sin(20 * x) * .2 - .5
x = np.vstack((x, x))
y = np.vstack((y, y + 1))
color = np.array([[1., 0., 0., 1.],
[0., 0., 1., 1.]], dtype=np.float32)
s.add_visual(PlotVisual, x, y, color=color)
scene_json = s.serialize()
print type(scene_json)
# write JS file
# f = open('scene.js', 'w')
# f.write("scene_json = '%s';" % scene_json)
# f.close()
| 2.21875 | 2 |
Dspot/Dspot/bookm/views.py | attaakkhan/Dspot-FYP-2016 | 0 | 12769793 | <reponame>attaakkhan/Dspot-FYP-2016
#!/usr/bin/python
# -*- coding: utf-8 -*-
from django.shortcuts import render
from django.template import RequestContext
# Create your views here.
from django.http import HttpResponse
from django.template import Context
from django.template.loader import get_template
from django.http import HttpResponse, Http404
from django.contrib.auth.models import User
from django.middleware.csrf import *
from django.http import HttpResponseRedirect
from django.contrib.auth import logout
from django.shortcuts import render_to_response
from bookm.forms import *
from django.shortcuts import get_object_or_404
from friends.models import *
from django.contrib.auth.decorators import login_required
from itertools import chain
# @csrf_protect()
def f_request(r_user, user):
if Friendship.objects.are_friends(r_user, user):
raise RuntimeError('%r amd %r are already friends' % (r_user,
user))
try:
# If there's a friendship request from the other user accept it.
get_object_or_404(FriendshipRequest, from_user=user,
to_user=r_user).accept()
except Http404:
request_message = 'message'
# If we already have an active friendship request IntegrityError
# will be raised and the transaction will be rolled back.
FriendshipRequest.objects.create(from_user=r_user,
to_user=user, message=request_message)
@login_required(login_url='/login/')
def friends_page(request, username):
user = get_object_or_404(User, username=username)
friends = [friendship.to_friend for friendship in
user.friend_set.all()]
friend_bookmarks = \
Bookmark.objects.filter(user__in=friends).order_by('-id')
return render(request, 'friends_page.html', {
'username': username,
'friends': friends,
'bookmarks': friend_bookmarks[:10],
'show_tags': True,
'show_user': True,
})
def profile(request, username):
user = get_object_or_404(User, username=username)
f_button = ''
a_button = 'Hidden'
print 'Fucku 1111111222'
if request.method == 'POST':
if 'f_request1' in request.POST:
if request.POST['f_request1'] == 'Friend':
Friendship.objects.unfriend(request.user, user)
elif request.POST['f_request1'] == 'Request Sent':
get_object_or_404(FriendshipRequest,
from_user=request.user,
to_user=user).cancel()
elif request.POST['f_request1'] == 'Send':
f_request(request.user, user)
elif request.POST['f_request1'] == 'Accept':
get_object_or_404(FriendshipRequest, from_user=user,
to_user=request.user).accept()
elif 'f_request2' in request.POST:
if request.POST['f_request2'] == 'Decline':
get_object_or_404(FriendshipRequest, from_user=user,
to_user=request.user).decline()
if user == request.user:
f_button = 'Me'
elif request.user.friendship.friends.filter(user=user):
f_button = 'Friend'
elif FriendshipRequest.objects.filter(to_user=user,
from_user=request.user):
f_button = 'Request Sent'
elif FriendshipRequest.objects.filter(to_user=request.user,
from_user=user):
f_button = 'Accept' # decline will appear on front end
a_button = 'submit'
else:
f_button = 'Send'
share_with_you = list(chain(
request.user.switch_man_us.filter(user=request.user),
request.user.switch_sen_us.filter(user=request.user),
request.user.switch_tim_us.filter(user=request.user),
request.user.alarm_clock_us.filter(user=request.user),
request.user.wareabout_us.filter(user=request.user),
request.user.door_bell_us.filter(user=request.user),
))
return render(request, 'profile.html', {
'uf_name': user.first_name,
'ul_name': user.last_name,
'u_email': user.email,
'f_button': f_button,
'share_with_you': share_with_you,
'a_button': a_button,
})
def search_page(request):
if request.method == 'POST':
if request.POST['search']:
f_list = User.objects.filter(username=request.POST['search'
])
print f_list, request.POST['search']
d_list = list(chain(
request.user.switch_man_u.filter(d_name=request.POST['search'
]),
request.user.switch_sen_u.filter(d_name=request.POST['search'
]),
request.user.switch_tim_u.filter(d_name=request.POST['search'
]),
request.user.alarm_clock_u.filter(d_name=request.POST['search'
]),
request.user.wareabout_u.filter(d_name=request.POST['search'
]),
request.user.door_bell_u.filter(d_name=request.POST['search'
]),
))
return render(request, 'search.html', {'f_list': f_list,
'd_list': d_list})
return render(request, 'search.html', {})
def logout_page(request):
logout(request)
return HttpResponseRedirect('/')
def main_page(request):
if request.user.is_authenticated():
return render(request, 'base.html', {})
else:
return render(request, 'main_page.html', {})
def user_page(request, username):
try:
user = User.objects.get(username=username)
except:
raise Http404('Requested user not found.')
bookmarks = user.bookmark_set.all()
return render(request, 'user_page.html', {'username': username,
'bookmarks': bookmarks})
def friends(request):
friends = [i.user for i in request.user.friendship.friends.all()]
return render(request, 'friends.html', {'friends': friends})
def register_page(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
user = \
User.objects.create_user(username=form.cleaned_data['username'
], first_name=form.cleaned_data['firstname'],
last_name=form.cleaned_data['lastname'],
password=form.cleaned_data['<PASSWORD>'],
email=form.cleaned_data['email'])
return HttpResponseRedirect('/')
else:
form = RegistrationForm()
return render(request, 'registration/register.html', {'form': form})
| 2.34375 | 2 |
render.py | Hypersycos/3D-Noughts-and-Crosses | 2 | 12769794 | <reponame>Hypersycos/3D-Noughts-and-Crosses<gh_stars>1-10
import math
import pygame
import pygame.gfxdraw
class Vector3d:
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def __add__(self, addition):
return __class__(self.x + addition.x, self.y + addition.y, self.z + addition.z)
def __sub__(self, subtraction):
return __class__(self.x - subtraction.x, self.y - subtraction.y, self.z - subtraction.z)
def __mul__(self, mult):
return __class__(self.x*mult, self.y*mult, self.z*mult)
def __truediv__(self, mult):
return __class__(self.x/mult, self.y/mult, self.z/mult)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
elif self.x != other.x:
return False
elif self.y != other.y:
return False
elif self.z != other.z:
return False
else:
return True
def __str__(self):
return str(round(self.x*1000)/1000)+", "+str(round(self.y*1000)/1000)+", "+str(round(self.z*1000)/1000)
def magnitude(self):
return math.sqrt(self.x**2 + self.y**2 + self.z**2)
def dotProduct(self, vector):
x = self.x * vector.x
y = self.y * vector.y
z = self.z * vector.z
return x+y+z
def angleBetween(self, other):
denom = self.magnitude() * other.magnitude()
numer = self.x*other.x + self.y*other.y + self.z*other.z
return math.acos(numer/denom)
def rotateX(self, rotation):
y = self.y*math.cos(rotation) - self.z*math.sin(rotation)
z = self.z*math.cos(rotation) + self.y*math.sin(rotation)
self.y, self.z = y,z
def rotateY(self, rotation):
x = self.x*math.cos(rotation) - self.z*math.sin(rotation)
z = self.z*math.cos(rotation) + self.x*math.sin(rotation)
self.x, self.z = x,z
def rotateZ(self, rotation):
x = self.x*math.cos(rotation) - self.y*math.sin(rotation)
y = self.y*math.cos(rotation) + self.x*math.sin(rotation)
self.x, self.y = x,y
def modifyAxes(self, rotation, rtrn=False, prnt=False):
self.rotateX(rotation.x)
self.rotateY(rotation.y)
self.rotateZ(rotation.z)
if rtrn:
return self
class Rotation3d(Vector3d):
def __init__(self, x, y, z, radians=False):
self.update(x, y, z, radians)
def update(self, x, y, z, radians=False):
if radians:
self.x = x
self.y = y
self.z = z
else:
self.x = math.radians(x)
self.y = math.radians(y)
self.z = math.radians(z)
def normalise(self):
if self.x > 2*math.pi:
self.x -= 2*math.pi
elif self.x < 0:
self.x += 2*math.pi
if self.y > 2*math.pi:
self.y -= 2*math.pi
elif self.y < 0:
self.y += 2*math.pi
if self.z > 2*math.pi:
self.z -= 2*math.pi
elif self.z < 0:
self.z += 2*math.pi
class Position3d(Vector3d):
def getDistance(self, otherPosition):
return (self-otherPosition).magnitude()
class Camera:
def __init__(self, position = Position3d(0,0,0), orientation = Rotation3d(0,0,0), fov = 90, width = 10, height = 10):
self.clock = pygame.time.Clock()
self.position = position
self.orientation = orientation
self.target = Position3d(0, 0, 1)
self.fov = fov
self.updateScreen(width, height)
self.move_speed = 0.01
self.sensitivity = 1
self.locked = False
self.radius = 1.8*fov/90
self.xaxis = Position3d(1, 0, 0)
self.yaxis = Position3d(0, 1, 0)
self.zaxis = Position3d(0, 0, 1)
def updateScreen(self, width, height):
self.ar = width/height
self.centre = (width/2, height/2)
self.vfov = math.tan(math.radians(self.fov)/2)
self.hfov = self.ar*math.tan(math.radians(self.fov)/2)
def toggleLock(self):
if self.locked:
self.locked = False
pygame.event.set_grab(True)
pygame.mouse.set_visible(False)
pygame.mouse.set_pos(self.centre)
else:
self.locked = True
pygame.event.set_grab(False)
pygame.mouse.set_visible(True)
def move(self, diff, zoom):
yRot = diff[0] * self.sensitivity
xRot = diff[1] * -self.sensitivity
self.orientation += Rotation3d(xRot, yRot, 0)
self.position = Position3d(0, 0, -zoom).modifyAxes(self.orientation, True)
if self.orientation.x > math.pi/2:
self.orientation.x = math.pi/2
elif self.orientation.x < -math.pi/2:
self.orientation.x = -math.pi/2
self.xaxis = Position3d(1, 0, 0).modifyAxes(self.orientation, True)
self.yaxis = Position3d(0, 1, 0).modifyAxes(self.orientation, True)
self.zaxis = Position3d(0, 0, 1).modifyAxes(self.orientation, True)
def render(self, screen, items, width, height):
self.updateScreen(width, height)
self.clock.tick()
order = furthestFirst(items, origin-self.position)
for item in order:
item.render(screen, self.vfov, self.hfov, self.position, self.xaxis, self.yaxis, self.zaxis, self.orientation, width, height)
class Face:
def __init__(self, points, colour=(0, 200, 0)):
self.points = points
#self.generateTriangles()
self.position = Position3d(0,0,0)
self.getPos()
self.colour = colour
def __str__(self):
toReturn = ""
for item in self.points:
toReturn += "("+str(item)+"), "
return toReturn[:-2]
def getPos(self):
for item in self.points:
self.position += item.position
self.position /= len(self.points)
def generateTriangles(self):
self.triangles = []
for i in range(1,len(self.points)-1,1):
self.triangles.append((self.points[0],self.points[i],self.points[i+1]))
def render(self, screen):
for i in range(0,5):
if len(list(filter(lambda x: not x.offScreen[i], self.points)))==0:
return
coords = [(point.lastRenderX,point.lastRenderY) for point in self.points]
for i in range(1,len(self.points)-1):
pygame.gfxdraw.aapolygon(screen, [coords[0], coords[i], coords[i+1]], self.colour)
pygame.draw.polygon(screen, self.colour, [coords[0], coords[i], coords[i+1]], 0)
class Point:
def __init__(self, x, y, z, colour=(0,200,0), radians=False):
self.origPos = Position3d(x, y, z)
self.position = Position3d(x,y,z)
self.colour = colour
self.colours = []
self.lastRenderX = 0
self.lastRenderY = 0
self.offScreen = [False,False,False,False,False]
self.radius = 1
self.canvas = pygame.Surface((self.radius*2+1, self.radius*2+1))
pygame.draw.circle(self.canvas, self.colour, (self.radius, self.radius), self.radius)
def __str__(self):
return str(self.position)
def render(self, screen, vfov, hfov, camera_pos, xaxis, yaxis, zaxis, screenWidth, screenHeight):
relativePos = self.position + camera_pos
x, y, z = relativePos.dotProduct(xaxis), relativePos.dotProduct(yaxis), relativePos.dotProduct(zaxis)
if z == 0:
return -1
dist = relativePos.magnitude()
self.lastRenderX = math.floor((x/(z*hfov)+1/2)*screenWidth)
self.lastRenderY = math.floor((1-(y/(z*vfov)+1/2))*screenHeight)
if z < 0:
self.lastRenderX = -self.lastRenderX
self.lastRenderY = -self.lastRenderY
## if z < 0 or self.lastRenderX < 0 or self.lastRenderX > screenWidth or self.lastRenderY < 0 or self.lastRenderY > screenHeight:
## self.offScreen = True
## else:
## self.offScreen = False
if z < 0: #behind, left, right, above, below
self.offScreen[0] = True
else:
self.offScreen[0] = False
if self.lastRenderX < 0:
self.offScreen[1] = True
self.offScreen[2] = False
elif self.lastRenderX > screenWidth:
self.offScreen[1] = False
self.offScreen[2] = True
else:
self.offScreen[1] = False
self.offScreen[2] = False
if self.lastRenderY < 0:
self.offScreen[3] = True
self.offScreen[4] = False
elif self.lastRenderY > screenHeight:
self.offScreen[3] = False
self.offScreen[4] = True
else:
self.offScreen[3] = False
self.offScreen[4] = False
def rasterise(self):
if len(self.connections) == 0:
return
if len(self.colours) == 0:
for i in range(0, len(self.connections)):
self.colours.append((random.randint(0,255), random.randint(0,255), random.randint(0,255)))
for i in range(len(self.connections)-1):
#colour = (random.randint(0,255), random.randint(0,255), random.randint(0,255))
coords = [(item.lastRenderX,item.lastRenderY) for item in [self]+self.connections[i:i+2]]
pygame.draw.polygon(screen, self.colours[i], coords, 0)
class Object3d:
def __init__(self,position,size,rotation,colour=(0,200,0), static=True):
self.position = position
self.size = size
self.rotation = rotation
self.colour = colour
self.points = []
self.faces = []
self.static = static
self.setup()
self.rotate(rotation, True)
def rotate(self, rotation, absolute=False):
for item in self.points:
item.position = item.origPos*1
if absolute:
self.rotation = rotation
else:
self.rotation += rotation
for item in self.points:
item.position.modifyAxes(self.rotation)
for item in self.faces:
item.getPos()
def render(self, screen, vfov, hfov, camera_pos, xaxis, yaxis, zaxis, cameraRotation, width, height):
relativePos = self.position - camera_pos
posList = []
order = furthestFirst(self.points, relativePos)
for item in order:
posList.append(item.render(screen, vfov, hfov, relativePos, xaxis, yaxis, zaxis, width, height))
order = furthestFirst(self.faces, relativePos)
for item in order:
item.render(screen)
def sortClockwise(self, points, fx, fy):
if len(points)<2:
return points
mid = len(points)//2
a = self.sortClockwise(points[:mid], fx, fy)
b = self.sortClockwise(points[mid:], fx, fy)
return self.mergeLists(a, b, fx, fy)
def compareClockwise(self, point1, point2, fx, fy):
x1, x2, y1, y2 = fx(point1), fx(point2), fy(point1), fy(point2)
if x1 >= 0 and x2 < 0:
return True
elif x1 == 0 and x2 == 0:
return y1 > y2
det = x1*y2 - x2*y1
if det < 0:
return True
elif det > 0:
return False
else:
return False
def mergeLists(self, list1, list2, fx, fy):
endList = []
while len(list1)>0 and len(list2)>0: #while items in both lists
if self.compareClockwise(list1[0],list2[0], fx, fy): #which has smallest item?
endList.append(list2.pop(0))
else: #remove 1st item of smaller and add to newlist
endList.append(list1.pop(0))
if len(list1)>0: #if list1 still has items
endList.extend(list1)
else: #add all items(already sorted)
endList.extend(list2)
return endList
class Cuboid(Object3d):
def setup(self):
self.connections = []
size = self.size
values = ((size.x/2, size.y/2, size.z/2),
(-size.x/2, size.y/2, size.z/2),
(size.x/2, -size.y/2, size.z/2),
(-size.x/2, -size.y/2, size.z/2))
for item in values:
self.points.append(Point(item[0], item[1], item[2], self.colour))
self.points.append(Point(-item[0], -item[1], -item[2], self.colour))
values = [size.x/2, size.y/2, size.z/2]
for i in range(2):
points = []
for item in self.points:
if item.position.x == values[0]:
points.append(item)
#points.sort(key=lambda x: math.acos(x.position.y/math.sqrt(x.position.y**2+x.position.z**2)))
points = self.sortClockwise(points, lambda x: x.position.y, lambda x: x.position.z)
self.faces.append(Face(points, self.colour))
values[0] = -values[0]
del values[0]
for i in range(2):
points = []
for item in self.points:
if item.position.y == values[0]:
points.append(item)
#points.sort(key=lambda x: math.acos(x.position.z/math.sqrt(x.position.x**2+x.position.z**2)))
points = self.sortClockwise(points, lambda x: x.position.x, lambda x: x.position.z)
#for item in points:
# print(item, math.sqrt(item.position.x**2+item.position.z**2) / item.position.z)
self.faces.append(Face(points, self.colour))
values[0] = -values[0]
del values[0]
for i in range(2):
points = []
for item in self.points:
if item.position.z == values[0]:
points.append(item)
#points.sort(key=lambda x: math.acos(x.position.y/math.sqrt(x.position.x**2+x.position.y**2)))
points = self.sortClockwise(points, lambda x: x.position.x, lambda x: x.position.y)
self.faces.append(Face(points, self.colour))
values[0] = -values[0]
def furthestFirst(li, relativePos):
order = []
for item in li:
pos = (item.position+relativePos)
order.append([item,pos.magnitude()])
order.sort(key=lambda x:-x[1])
order = list(map(lambda x:x[0], order))
return order
origin = Rotation3d(0,0,0)
| 3.078125 | 3 |
scripts/20210331_prepare_text_classify/filter_articles.py | SPOClab-ca/writing-features-AI-papers | 0 | 12769795 | import argparse
import os, sys, time
import json
import jsonlines
import nltk
from nltk import word_tokenize, sent_tokenize
import pandas as pd
from pathlib import Path
import pickle
from utils import timed_func
categories = [
"AAAI", "ACL", "COLING", "CVPR", "EMNLP", "ICML", "ICRA", "IJCAI", "NAACL", "NIPS", "ICASSP"
]
def get_venue_labels(args):
"""
Return venue_name -> [v_shortname, label]
where venue_name is e.g., "AAAI Spring Symposium 2013"
v_shortname is e.g., "AAAI"
label is binary (0 - Workshop, 1 - Conference)
"""
venue_name_labels_map = {}
for v_shortname in categories:
if args.include_arxiv:
fname = Path(args.venue_name_labels_path, f"{v_shortname}_v_arxiv.csv")
else:
fname = Path(args.venue_name_labels_path, f"{v_shortname}.csv")
df = pd.read_csv(fname)
for i, row in df.iterrows():
v = row.venue
venue_name_labels_map[v] = [v_shortname, row.label]
return venue_name_labels_map
def prepare_text(metadata, pdfparse):
"""
Return:
abstract: str (or None)
bodytext: list of str (or None)
"""
MIN_WORDS = 5
abstract = metadata['abstract']
if abstract is None or len(abstract.split()) < MIN_WORDS:
return None, None
bodytext = []
for section in pdfparse['body_text']:
if section['text'] is not None:
bodytext.extend(sent_tokenize(section['text']))
bodytext = [sent for sent in filter(lambda s: len(s)>0, bodytext)]
if len(bodytext) == 0:
return None, None
return abstract, bodytext
@timed_func
def filter_articles_main(venue_name_labels_map, args):
"""
Find these articles. Save into standalone pkl file
results: dictionary (key is category)
results['AAAI']: list of articles. Each article contains {'abstract': str, 'bodytext': list of str, 'venue': str, 'label': int (0 or 1)}
"""
results = {}
for cat in categories:
results[cat] = []
# Traverse the CompSci paper collections. Save
for chunk_id in range(100):
start_time = time.time()
with open(Path(args.input_dir, f"metadata_{chunk_id}.jsonl"), "r") as f_md:
mds = [json.loads(line) for line in f_md.readlines()]
with open(Path(args.input_dir, f"pdf_parses_{chunk_id}.pkl"), "rb") as f_pp:
cat_pdf = pickle.load(f_pp)
skipped = 0
collected = 0
for i, metadata in enumerate(mds):
paper_id = metadata['paper_id']
pdfparse = cat_pdf[paper_id]
abstract, bodytext = prepare_text(metadata, pdfparse)
if abstract is None or metadata['year'] is None:
skipped += 1
continue
# Following the 20201206_venue_info/venue_info.py convention (i.e., prioritize journal, then venue) for extracting venue information
journal = metadata.get('journal', None)
venue = metadata.get('venue', None)
if journal is not None:
v = journal
elif venue is not None:
v = venue
else:
v = "None"
if v in venue_name_labels_map:
shortname, label = venue_name_labels_map[v]
curr_year = 2021
citation_per_year = len(metadata['inbound_citations']) / (curr_year - metadata['year'])
results[shortname].append({
"abstract": abstract,
"bodytext": bodytext,
"venue": v,
"label": label,
"year": metadata.get('year', None),
"title": metadata.get("title", None),
"annual_citations": citation_per_year
})
collected += 1
print ("Chunk {} done in {:.2f} seconds. Skipped {} entries. Collected {} entries.".format(chunk_id, time.time() - start_time, skipped, collected))
export_dir = Path(args.export)
with open(args.export, "wb") as f:
pickle.dump(results, f)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--venue_name_labels_path", type=str, default="../../data/venue_name_labels")
parser.add_argument("--include_arxiv", action="store_true", default=False)
parser.add_argument("--input_dir", type=str, default="../../data/S2ORC/20200705v1/by_category/Computer Science/")
parser.add_argument("--export", type=str, default="../../data/text_classify_articles_with_arxiv.pkl")
args = parser.parse_args()
print(args)
name_map = get_venue_labels(args)
filter_articles_main(name_map, args)
| 2.546875 | 3 |
otp/boundingbox.py | NExTplusplus/object-trajectory-proposal | 2 | 12769796 | import numpy as np
class BBoxFilter(object):
def __init__(self, min_area, max_area, min_ratio):
self.min_area = min_area
self.max_area = max_area
self.min_ratio = min_ratio
def __call__(self, bbox):
assert len(bbox) == 4
area = bbox[2] * bbox[3]
if area < self.min_area or area > self.max_area:
return False
if min(bbox[2], bbox[3]) / max(bbox[2], bbox[3]) < self.min_ratio:
return False
return True
def truncate_bbox(bbox, h, w):
cmin = np.clip(bbox[0], 0, w - 1)
cmax = np.clip(bbox[0] + bbox[2], 0, w - 1)
rmin = np.clip(bbox[1], 0, h - 1)
rmax = np.clip(bbox[1] + bbox[3], 0, h - 1)
# return int(cmin), int(rmin), int(cmax - cmin), int(rmax - rmin)
return cmin, rmin, cmax - cmin, rmax - rmin
def round_bbox(bbox):
bbox = np.floor(bbox).astype(np.int32)
return tuple(bbox)
def compute_bbox(bimg):
rows = np.any(bimg, axis = 1)
cols = np.any(bimg, axis = 0)
rmin, rmax = np.where(rows)[0][[0, -1]]
cmin, cmax = np.where(cols)[0][[0, -1]]
return cmin, rmin, cmax - cmin, rmax - rmin
def compute_iou(bbox1, bbox2):
if bbox1 is None or bbox2 is None:
return None
cmin = max(bbox1[0], bbox2[0])
rmin = max(bbox1[1], bbox2[1])
cmax = min(bbox1[0] + bbox1[2], bbox2[0] + bbox2[2])
rmax = min(bbox1[1] + bbox1[3], bbox2[1] + bbox2[3])
if (cmin < cmax) and (rmin < rmax):
intersect = float(cmax - cmin) * (rmax - rmin)
return intersect / (bbox1[2] * bbox1[3] + bbox2[2] * bbox2[3] - intersect)
else:
return 0.
def find_max_iou(bbox, bboxes):
bbox = np.asarray(bbox)
bboxes = np.asarray(bboxes)
if bboxes.shape[0] == 0:
return -1, 0.
minp = np.maximum([bbox[:2]], bboxes[:, :2])
maxp = np.minimum([bbox[:2] + bbox[2:]], bboxes[:, :2] + bboxes[:, 2:])
delta = maxp - minp
intersect_inds = np.where(np.all(delta > 0, axis = 1))[0]
intersect = np.prod(delta[intersect_inds, :], axis = 1, dtype = np.float32)
ious = intersect / (bbox[2] * bbox[3] + \
np.prod(bboxes[intersect_inds, 2:], axis = 1) - intersect)
if ious.shape[0] == 0:
return -1, 0.
else:
max_ind = np.argmax(ious)
return intersect_inds[max_ind], ious[max_ind]
def ciou(bboxes1, bboxes2):
"""
Compute IoUs between two sets of bounding boxes
Input: np.array((n, 4), np.float32), np.array((m, 4), np.float32)
Output: np.array((n, m), np.float32)
"""
cmin = np.maximum.outer(bboxes1[:, 0], bboxes2[:, 0])
cmax = np.minimum.outer(bboxes1[:, 0] + bboxes1[:, 2],
bboxes2[:, 0] + bboxes2[:, 2])
w = cmax - cmin
del cmax, cmin
w.clip(min = 0, out = w)
rmin = np.maximum.outer(bboxes1[:, 1], bboxes2[:, 1])
rmax = np.minimum.outer(bboxes1[:, 1] + bboxes1[:, 3],
bboxes2[:, 1] + bboxes2[:, 3])
h = rmax - rmin
del rmax, rmin
h.clip(min = 0, out = h)
iou = w
np.multiply(w, h, out = iou)
del w, h
a1 = np.prod(bboxes1[:, 2:], axis = 1)
a2 = np.prod(bboxes2[:, 2:], axis = 1)
np.divide(iou, np.add.outer(a1, a2) - iou, out = iou)
return iou
# @jit('float32[:, :](float32[:, :], float32[:, :])')
# def ciou_v2(bboxes1, bboxes2):
# """
# Compute IoUs between two sets of bounding boxes
# Input: np.array((n, 4), np.float32), np.array((m, 4), np.float32)
# Output: np.array((n, m), np.float32)
# """
# n = bboxes1.shape[0]
# m = bboxes2.shape[0]
# iou = np.zeros((n, m), dtype = np.float32)
# for i in range(n):
# for j in range(m):
# minp = np.maximum(bboxes1[i, :2], bboxes2[j, :2])
# maxp = np.minimum(bboxes1[i, :2] + bboxes1[i, 2:],
# bboxes2[j, :2] + bboxes2[j, 2:])
# delta = maxp - minp
# if delta[0] > 0 and delta[1] > 0:
# intersect = np.prod(delta)
# iou[i, j] = intersect / (np.prod(bboxes1[i, 2:]) + \
# np.prod(bboxes2[j, 2:]) - intersect)
# return iou
def _intersect(bboxes1, bboxes2):
"""
bboxes: t x n x 4
"""
assert bboxes1.shape[0] == bboxes2.shape[0]
t = bboxes1.shape[0]
inters = np.zeros((bboxes1.shape[1], bboxes2.shape[1]), dtype = np.float32)
_min = np.empty((bboxes1.shape[1], bboxes2.shape[1]), dtype = np.float32)
_max = np.empty((bboxes1.shape[1], bboxes2.shape[1]), dtype = np.float32)
w = np.empty((bboxes1.shape[1], bboxes2.shape[1]), dtype = np.float32)
h = np.empty((bboxes1.shape[1], bboxes2.shape[1]), dtype = np.float32)
for i in range(t):
np.maximum.outer(bboxes1[i, :, 0], bboxes2[i, :, 0], out = _min)
np.minimum.outer(bboxes1[i, :, 0] + bboxes1[i, :, 2],
bboxes2[i, :, 0] + bboxes2[i, :, 2], out = _max)
np.subtract(_max, _min, out = w)
w.clip(min = 0, out = w)
np.maximum.outer(bboxes1[i, :, 1], bboxes2[i, :, 1], out = _min)
np.minimum.outer(bboxes1[i, :, 1] + bboxes1[i, :, 3],
bboxes2[i, :, 1] + bboxes2[i, :, 3], out = _max)
np.subtract(_max, _min, out = h)
h.clip(min = 0, out = h)
np.multiply(w, h, out = w)
inters += w
return inters
def _union(bboxes1, bboxes2):
if id(bboxes1) == id(bboxes2):
w = bboxes1[:, :, 2]
h = bboxes1[:, :, 3]
area = np.sum(w * h, axis = 0)
unions = np.add.outer(area, area)
else:
w = bboxes1[:, :, 2]
h = bboxes1[:, :, 3]
area1 = np.sum(w * h, axis = 0)
w = bboxes2[:, :, 2]
h = bboxes2[:, :, 3]
area2 = np.sum(w * h, axis = 0)
unions = np.add.outer(area1, area2)
return unions
def viou(bboxes1, bboxes2):
# bboxes: t x n x 4
iou = _intersect(bboxes1, bboxes2)
union = _union(bboxes1, bboxes2)
np.subtract(union, iou, out = union)
np.divide(iou, union, out = iou)
return iou | 2.375 | 2 |
build/lib/discovery_imaging_utils/imaging_utils.py | erikglee/discovery_imaging_utils | 0 | 12769797 | #!/usr/bin/env python
import sys
from nibabel import load as nib_load
import nibabel as nib
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import statsmodels.api as sm
from scipy import signal
import os
from numpy import genfromtxt
from sklearn.decomposition import PCA
def load_gifti_func(path_to_file):
"""
#Wrapper function to load functional data from
#a gifti file using nibabel. Returns data in shape
#<num_verts x num_timepoints>
"""
gifti_img = nib_load(path_to_file)
gifti_list = [x.data for x in gifti_img.darrays]
gifti_data = np.vstack(gifti_list).transpose()
return gifti_data
def load_cifti_func(path_to_file):
cifti_img = nib_load(path_to_file)
return np.asarray(cifti_img.dataobj).transpose()
def calc_fishers_icc(tp1, tp2):
"""
#Calculate intraclass correlation coefficient
#from the equation on wikipedia describing
#fisher's formulation. tp1 and tp2 should
# be of shape (n,1) or (n,) where n is the
#number of samples
"""
xhat = np.mean(np.vstack((tp1, tp2)))
sq_dif1 = np.power((tp1 - xhat),2)
sq_dif2 = np.power((tp2 - xhat),2)
s2 = np.mean(np.vstack((sq_dif1, sq_dif2)))
r = 1/(tp1.shape[0]*s2)*np.sum(np.multiply(tp1 - xhat, tp2 - xhat))
return r
def pre_post_carpet_plot(noisy_time_series, cleaned_time_series):
"""
#This function is for calculating a carpet plot figure, that
#will allow for comparison of the BOLD time series before and
#after denoising takes place. The two input matrices should have
#shape <num_parcels, num_timepoints>, and will ideally be from a
#parcellated time series and not whole hemisphere data (lots of points).
#The script will demean and then normalize all regions' time signals,
#and then will display them side by side on grey-scale plots
"""
#Copy the data
noisy_data = np.copy(noisy_time_series)
clean_data = np.copy(cleaned_time_series)
#Calculate means and standard deviations for all parcels
noisy_means = np.mean(noisy_data, axis = 1)
noisy_stds = np.std(noisy_data, axis = 1)
clean_means = np.mean(clean_data, axis = 1)
clean_stds = np.std(clean_data, axis = 1)
#Empty matrices for demeaned and normalized data
dn_noisy_data = np.zeros(noisy_data.shape)
dn_clean_data = np.zeros(clean_data.shape)
#Use the means and stds to mean and normalize all parcels' time signals
for i in range(0, clean_data.shape[0]):
dn_noisy_data[i,:] = (noisy_data[i,:] - noisy_means[i])/noisy_stds[i]
dn_clean_data[i,:] = (clean_data[i,:] - clean_means[i])/clean_stds[i]
#Create a subplot
plot_obj = plt.subplot(1,2,1)
#Plot the noisy data
img_plot = plt.imshow(dn_noisy_data, aspect = 'auto', cmap = 'binary')
plt.title('Noisy BOLD Data')
plt.xlabel('Timepoint #')
plt.ylabel('Region # (Arbritrary)')
plt.colorbar()
#Plot the clean data
plt.subplot(1,2,2)
img_plot2 = plt.imshow(dn_clean_data, aspect = 'auto', cmap = 'binary')
plt.title('Clean BOLD Data')
plt.xlabel('Timepoint #')
plt.colorbar()
fig = plt.gcf()
fig.set_size_inches(15, 5)
return plot_obj
def parcellate_func_combine_hemis(lh_func, rh_func, lh_parcel_path, rh_parcel_path):
"""
#Function that takes functional data in the form <num_verts, num_timepoints> for
#both the left and right hemisphere, and averages the functional time series across
#all vertices defined in a given parcel, for every parcel, with the parcels identified
#by a annotation file specified at ?h_parcel_path. The function then returns a combined
#matrix of size <num_parcels, num_timepoints> and <num_labels> for the time series and
#parcel label names, respectively. The lh parcels will preceed the rh parcels in order.
#NOTE: THIS ASSUMES THE FIRST PARCEL WILL BE MEDIAL WALL, AND DISREGARDS ANY VERTICES WITHIN
#THAT PARCEL. IF THIS IS NOT THE CASE FOR YOUR PARCELLATION, DO NOT USE THIS FUNCTION.
"""
#Output will be tuple of format [labels, ctab, names]
lh_parcels = nib.freesurfer.io.read_annot(lh_parcel_path)
rh_parcels = nib.freesurfer.io.read_annot(rh_parcel_path)
#Make array to store parcellated data with shape <num_parcels, num_timepoints>
lh_parcellated_data = np.zeros((len(lh_parcels[2]) - 1, lh_func.shape[1]))
rh_parcellated_data = np.zeros((len(rh_parcels[2]) - 1, rh_func.shape[1]))
#Start with left hemisphere
for i in range(1,len(lh_parcels[2])):
#Find the voxels for the current parcel
vois = np.where(lh_parcels[0] == i)
#Take the mean of all voxels of interest
lh_parcellated_data[i-1, :] = np.mean(lh_func[vois[0],:], axis = 0)
#Move to right hemisphere
for i in range(1,len(rh_parcels[2])):
vois = np.where(rh_parcels[0] == i)
rh_parcellated_data[i-1, :] = np.mean(rh_func[vois[0],:], axis = 0)
#Then concatenate parcel labels and parcel timeseries between the left and right hemisphere
#and drop the medial wall from label list
parcellated_data = np.vstack((lh_parcellated_data, rh_parcellated_data))
parcel_labels = lh_parcels[2][1:] + rh_parcels[2][1:]
#Try to convert the parcel labels from bytes to normal string
for i in range(0, len(parcel_labels)):
parcel_labels[i] = parcel_labels[i].decode("utf-8")
return parcellated_data, parcel_labels
def net_mat_summary_stats(matrix_data, include_diagonals, parcel_labels):
"""
#Function that takes a network matrix of size <num_parcels x num_parcels>
#and calculates summary statistics for each grouping of parcels within a
#given network combination (i.e. within DMN would be one grouping, between
#DMN and Control would be another grouping). If you would like to include
#the diagonals of the matrix set include_diagonals to true, otherwise,
#as is the case in conventional functional connectivity matrices, exclude
#the diagonal since it will most commonly be 1 or Inf.
#This function only works on data formatted in the Schaeffer/Yeo 7 network
#configuration.
#Parcel labels should be a list of strings that has the names of the different
#parcels in the parcellation. This is how the function knows what parcels
#belong to what networks.
"""
#The names of the different networks
network_names = ['Vis', 'SomMot', 'DorsAttn', 'SalVentAttn', 'Limbic', 'Cont', 'Default']
#Array to store network IDs (0-6, corresponding to order of network names)
network_ids = np.zeros((len(parcel_labels),1))
#Find which network each parcel belongs to
for i in range(0,len(parcel_labels)):
for j in range(0,len(network_names)):
if network_names[j] in parcel_labels[i]:
network_ids[i] = j
#Calculate the average stat for each network combination
network_stats = np.zeros((7,7))
for i in range(0,7):
for j in range(0,7):
temp_stat = 0
temp_stat_count = 0
rel_inds_i = np.where(network_ids == i)[0]
rel_inds_j = np.where(network_ids == j)[0]
for inds_i in rel_inds_i:
for inds_j in rel_inds_j:
if inds_i == inds_j:
if include_diagonals == True:
temp_stat += matrix_data[inds_i, inds_j]
temp_stat_count += 1
else:
temp_stat += matrix_data[inds_i, inds_j]
temp_stat_count += 1
network_stats[i,j] = temp_stat/temp_stat_count
return network_stats
def net_summary_stats(parcel_data, parcel_labels):
"""
#Function that takes a statistic defined at a parcel level, and
#resamples that statistic to the network level. This function is a copy of
#net_mat_summary_stats only now defined to work on 1D instead of 2D data.
#This function only works on data formatted in the Schaeffer/Yeo 7 network
#configuration.
#Parcel labels should be a list of strings that has the names of the different
#parcels in the parcellation. This is how the function knows what parcels
#belong to what networks.
"""
#The names of the different networks
network_names = ['Vis', 'SomMot', 'DorsAttn', 'SalVentAttn', 'Limbic', 'Cont', 'Default']
#Array to store network IDs (0-6, corresponding to order of network names)
network_ids = np.zeros((len(parcel_labels),1))
#Find which network each parcel belongs to
for i in range(0,len(parcel_labels)):
for j in range(0,len(network_names)):
if network_names[j] in parcel_labels[i]:
network_ids[i] = j
#Calculate the average stat for each network combination
network_stats = np.zeros((7))
for i in range(0,7):
temp_stat = 0
temp_stat_count = 0
rel_inds_i = np.where(network_ids == i)[0]
for inds_i in rel_inds_i:
temp_stat += parcel_data[inds_i]
temp_stat_count += 1
network_stats[i] = temp_stat/temp_stat_count
return network_stats
def plot_network_timeseries(parcel_data, parcel_labels):
#The names of the different networks
network_names = ['Vis', 'SomMot', 'DorsAttn', 'SalVentAttn', 'Limbic', 'Cont', 'Default']
network_colors = [[121/255,3/255,136/255,1],[67/255,129/255,182/255,1],[0/255,150/255,0/255,1], \
[198/255,41/255,254/255,1],[219/255,249/255,160/255,1], \
[232/255,149/255,0/255,1], [207/255,60/255,74/255,1]]
#Array to store network IDs (0-6, corresponding to order of network names)
network_ids = np.zeros((len(parcel_labels),1))
#Find which network each parcel belongs to
for i in range(0,len(parcel_labels)):
for j in range(0,len(network_names)):
if network_names[j] in parcel_labels[i]:
network_ids[i] = j
fig, ax = plt.subplots(7,1)
for i in range(0,7):
in_network = np.where(network_ids == i)[0]
plt.sca(ax[i])
for j in range(0, in_network.shape[0]):
plt.plot(parcel_data[in_network[j]], color=network_colors[i])
plt.ylabel('Signal Intensity')
plt.title('Time-Course For All ' + network_names[i] + ' Parcels')
if i != 6:
plt.xticks([])
plt.xlabel('Volume # (excluding high-motion volumes)')
fig.set_size_inches(15, 20)
return fig
def calc_norm_std(parcel_data, confound_path):
"""
#This script is used to calculate the normalized standard
#deviation of a cleaned fmri time signal. This is a metric
#representative of variability/amplitude in the BOLD signal.
#This is a particularly good option if you are working with
#scrubbed data such that the FFT for ALFF can no longer be
#properly calculated.
#parcel_data has size <num_regions, num_timepoints>. Confound
#path is the path to the confound file for the run of interest.
#The global signal will be taken from the confound file to calculate
#the median BOLD signal in the brain before pre-processing. This will then
#be used to normalize the standard deviation of the BOLD signal such that
#the output measure will be std(BOLD_Time_Series)/median_global_signal_intensity.
"""
#Create a dataframe for nuisance variables in confounds
confound_df = pd.read_csv(confound_path, sep='\t')
global_signal = confound_df.global_signal.values
median_intensity = np.median(global_signal)
parcel_std = np.zeros((parcel_data.shape[0]))
for i in range(0, parcel_data.shape[0]):
parcel_std[i] = np.std(parcel_data[i,:])/median_intensity
return parcel_std
def network_bar_chart(network_vals, ylabel):
#The names of the different networks
network_names = ['Vis', 'SomMot', 'DorsAttn', 'SalVentAttn', 'Limbic', 'Cont', 'Default']
network_colors = [[121/255,3/255,136/255,1],[67/255,129/255,182/255,1],[0/255,150/255,0/255,1], \
[198/255,41/255,254/255,1],[219/255,249/255,160/255,1], \
[232/255,149/255,0/255,1], [207/255,60/255,74/255,1]]
x = [1, 2, 3, 4, 5, 6, 7]
fig = plt.bar(x, network_vals, color = network_colors, tick_label = network_names)
plt.ylabel(ylabel)
plt.xticks(rotation=45)
return fig
def fs_anat_to_array(path_to_fs_subject, folder_for_output_files):
"""
#This function serves the function of collecting the aseg.stats file,
#lh.aparc.stats file, and rh.aparc.stats files from a freesurfer subject
#found at the path path_to_fs_subject, and grabs the volumes for all
#subcortical structures, along with volumes, thicknesses, and surface
#areas for all cortical structures, and saves them as .npy files under
#folder_for_output_files. Also saves a text file with the names of the
#regions (one for subcortical, and one for lh/rh)
"""
aseg_path = os.path.join(path_to_fs_subject, 'stats', 'aseg.stats')
lh_path = os.path.join(path_to_fs_subject, 'stats', 'lh.aparc.stats')
rh_path = os.path.join(path_to_fs_subject, 'stats', 'rh.aparc.stats')
f = open(aseg_path, "r")
lines = f.readlines()
f.close()
header = '# ColHeaders Index SegId NVoxels Volume_mm3 StructName normMean normStdDev normMin normMax normRange'
subcort_names = ['Left-Lateral-Ventricle', 'Left-Inf-Lat-Vent', 'Left-Cerebellum-White-Matter',
'Left-Cerebellum-Cortex', 'Left-Thalamus-Proper', 'Left-Caudate', 'Left-Putamen',
'Left-Pallidum', '3rd-Ventricle', '4th-Ventricle', 'Brain-Stem', 'Left-Hippocampus',
'Left-Amygdala', 'CSF' ,'Left-Accumbens-area', 'Left-VentralDC', 'Left-vessel',
'Left-choroid-plexus', 'Right-Lateral-Ventricle', 'Right-Inf-Lat-Vent',
'Right-Cerebellum-White-Matter','Right-Cerebellum-Cortex', 'Right-Thalamus-Proper',
'Right-Caudate', 'Right-Putamen', 'Right-Pallidum', 'Right-Hippocampus',
'Right-Amygdala', 'Right-Accumbens-area', 'Right-VentralDC', 'Right-vessel',
'Right-choroid-plexus', '5th-Ventricle', 'WM-hypointensities', 'Left-WM-hypointensities',
'Right-WM-hypointensities', 'non-WM-hypointensities', 'Left-non-WM-hypointensities',
'Right-non-WM-hypointensities', 'Optic-Chiasm', 'CC_Posterior', 'CC_Mid_Posterior',
'CC_Central', 'CC_Mid_Anterior', 'CC_Anterior']
aseg_vol = []
header_found = 0
for i in range(0,len(lines)):
if header_found == 1:
split_line = lines[i].split()
if split_line[4] != subcort_names[i-header_found_ind]:
raise NameError('Error: anatomy names do not line up with expectation. Expected ' +
subcort_names[i-header_found_ind] + ' but found ' + split_line[4])
aseg_vol.append(float(split_line[3]))
if header in lines[i]:
header_found = 1
header_found_ind = i + 1 #actually add one for formatting....
#This indicates that (1) the column headings should
#be correct, and that (2) this is where to start
#looking for anatomical stats
lh_f = open(lh_path, "r")
lh_lines = lh_f.readlines()
lh_f.close()
header = '# ColHeaders StructName NumVert SurfArea GrayVol ThickAvg ThickStd MeanCurv GausCurv FoldInd CurvInd'
cort_names = ['bankssts', 'caudalanteriorcingulate', 'caudalmiddlefrontal', 'cuneus', 'entorhinal',
'fusiform', 'inferiorparietal', 'inferiortemporal', 'isthmuscingulate', 'lateraloccipital',
'lateralorbitofrontal', 'lingual', 'medialorbitofrontal', 'middletemporal', 'parahippocampal',
'paracentral', 'parsopercularis', 'parsorbitalis', 'parstriangularis', 'pericalcarine',
'postcentral', 'posteriorcingulate', 'precentral', 'precuneus', 'rostralanteriorcingulate',
'rostralmiddlefrontal', 'superiorfrontal', 'superiorparietal', 'superiortemporal', 'supramarginal',
'frontalpole', 'temporalpole', 'transversetemporal', 'insula']
lh_surface_area = []
lh_volume = []
lh_thickness = []
header_found = 0
for i in range(0,len(lh_lines)):
if header_found == 1:
split_line = lh_lines[i].split()
if split_line[0] != cort_names[i-header_found_ind]:
raise NameError('Error: anatomy names do not line up with expectation. Expected ' +
cort_names[i-header_found_ind] + ' but found ' + split_line[4])
#then insert text to actually grab/save the data.....
lh_surface_area.append(float(split_line[2]))
lh_volume.append(float(split_line[3]))
lh_thickness.append(float(split_line[4]))
if header in lh_lines[i]:
header_found = 1
header_found_ind = i + 1 #actually add one for formatting....
#This indicates that (1) the column headings should
#be correct, and that (2) this is where to start
#looking for anatomical stats
rh_f = open(rh_path, "r")
rh_lines = rh_f.readlines()
rh_f.close()
rh_surface_area = []
rh_volume = []
rh_thickness = []
header_found = 0
for i in range(0,len(rh_lines)):
if header_found == 1:
split_line = rh_lines[i].split()
if split_line[0] != cort_names[i-header_found_ind]:
raise NameError('Error: anatomy names do not line up with expectation. Expected ' +
cort_names[i-header_found_ind] + ' but found ' + split_line[4])
#then insert text to actually grab/save the data.....
rh_surface_area.append(float(split_line[2]))
rh_volume.append(float(split_line[3]))
rh_thickness.append(float(split_line[4]))
if header in rh_lines[i]:
header_found = 1
header_found_ind = i + 1 #actually add one for formatting....
#This indicates that (1) the column headings should
#be correct, and that (2) this is where to start
#looking for anatomical stats
if os.path.exists(folder_for_output_files) == False:
os.mkdir(folder_for_output_files)
#Save the metrics as numpy files
np.save(os.path.join(folder_for_output_files, 'aseg_vols.npy'), np.asarray(aseg_vol))
np.save(os.path.join(folder_for_output_files, 'lh_aseg_surface_areas.npy'), np.asarray(lh_surface_area))
np.save(os.path.join(folder_for_output_files, 'lh_aseg_volumes.npy'), np.asarray(lh_volume))
np.save(os.path.join(folder_for_output_files, 'lh_aseg_thicknesses.npy'), np.asarray(lh_thickness))
np.save(os.path.join(folder_for_output_files, 'rh_aseg_surface_areas.npy'), np.asarray(rh_surface_area))
np.save(os.path.join(folder_for_output_files, 'rh_aseg_volumes.npy'), np.asarray(rh_volume))
np.save(os.path.join(folder_for_output_files, 'rh_aseg_thicknesses.npy'), np.asarray(rh_thickness))
#Calculate some bilateral metrics
left_vent = 0
right_vent = 18
total_lateral_vent = aseg_vol[left_vent] + aseg_vol[right_vent]
left_hipp = 11
right_hipp = 26
total_hipp_vol = aseg_vol[left_hipp] + aseg_vol[right_hipp]
left_thal = 4
right_thal = 22
total_thal_vol = aseg_vol[left_thal] + aseg_vol[right_thal]
left_amyg = 12
right_amyg = 27
total_amyg_vol = aseg_vol[left_amyg] + aseg_vol[right_amyg]
#Also calculate global thickness
numerator = np.sum(np.multiply(lh_surface_area,lh_thickness)) + np.sum(np.multiply(rh_surface_area,rh_thickness))
denominator = np.sum(lh_surface_area) + np.sum(rh_surface_area)
whole_brain_ave_thick = numerator/denominator
discovery_metric_array = [total_hipp_vol, total_amyg_vol, total_thal_vol,
total_lateral_vent, whole_brain_ave_thick]
np.save(os.path.join(folder_for_output_files, 'discovery_anat_metrics.npy'), np.asarray(discovery_metric_array))
discovery_anat_ids = ['bilateral_hipp_volume', 'bilateral_amyg_vol', 'bilateral_thal_vol',
'bilateral_lateral_vent_vol', 'whole_brain_ave_thick']
#Then save a file with the region names
with open(os.path.join(folder_for_output_files, 'subcortical_region_names.txt'), 'w') as f:
for item in subcort_names:
f.write("%s\n" % item)
with open(os.path.join(folder_for_output_files, 'cortical_region_names.txt'), 'w') as f:
for item in cort_names:
f.write("%s\n" % item)
with open(os.path.join(folder_for_output_files, 'discovery_region_names.txt'), 'w') as f:
for item in discovery_anat_ids:
f.write("%s\n" % item)
return
def calculate_XT_X_Neg1_XT(X):
"""
#Calculate term that can be multiplied with
#Y to calculate the beta weights for least
#squares regression. X should be of shape
#(n x d) where n is the number of observations
#and d is the number of dimensions/predictors
#uses inverse transform
"""
XT = X.transpose()
XT_X_Neg1 = np.linalg.pinv(np.matmul(XT,X))
return np.matmul(XT_X_Neg1, XT)
def partial_clean_fast(Y, XT_X_Neg1_XT, bad_regressors):
"""
#Function to help in the denoising of time signal Y with shape
#(n,1) or (n,) where n is the number of timepoints.
#XT_X_Neg1_XT is ((X^T)*X)^-1*(X^T), where ^T represents transpose
#and ^-1 represents matrix inversions. X contains bad regressors including
#noise ICs, a constant component, and a linear trend (etc.), and good regressors
#containing non-motion related ICs. The Beta weights for the linear model
#will be solved by multiplying XT_X_Neg1_XT with Y, and then the beta weights
#determined for the bad regressors will be subtracted off from Y and the residuals
#from this operation will be returned. For this reason, it is important to
#put all bad regressors in front when doing matrix multiplication
"""
B = np.matmul(XT_X_Neg1_XT, Y)
Y_noise = np.matmul(bad_regressors, B[:bad_regressors.shape[1]])
return (Y - Y_noise)
from scipy.signal import butter, filtfilt
def construct_filter(btype, cutoff, TR, order):
"""
#btype should be 'lowpass', 'highpass', or 'bandpass' and
#cutoff should be list (in Hz) with length 1 for low and high and
#2 for band. Order is the order of the filter
#which will be doubled since filtfilt will be used
#to remove phase distortion from the filter. Recommended
#order is 6. Will return filter coefficients b and a for
#the desired butterworth filter.
#Constructs filter coefficients. Use apply_filter to use
#the coefficients to filter a signal.
#Should have butter imported from scipy.signal
"""
nyq = 0.5 * (1/TR)
if btype == 'lowpass':
if len(cutoff) != 1:
raise NameError('Error: lowpass type filter should have one cutoff values')
low = cutoff[0]/nyq
b, a = butter(order, low, btype='lowpass')
elif btype == 'highpass':
if len(cutoff) != 1:
raise NameError('Error: highpass type filter should have one cutoff values')
high = cutoff[0]/nyq
b, a = butter(order, high, btype='highpass')
elif btype == 'bandpass':
if len(cutoff) != 2:
raise NameError('Error: bandpass type filter should have two cutoff values')
low = min(cutoff)/nyq
high = max(cutoff)/nyq
b, a = butter(order, [low, high], btype='bandpass')
else:
raise NameError('Error: filter type should by low, high, or band')
return b, a
########################################################################################
########################################################################################
########################################################################################
def apply_filter(b, a, signal):
"""
#Wrapper function to apply the filter coefficients from
#construct_filter to a signal.
#should have filtfilt imported from scipy.signal
"""
filtered_signal = filtfilt(b, a, signal)
return filtered_signal
########################################################################################
########################################################################################
########################################################################################
def output_stats_figures_pa_ap_compare(cleaned_ap, cleaned_pa):
cleaned_ap_netmat = np.corrcoef(cleaned_ap)
cleaned_pa_netmat = np.corrcoef(cleaned_pa)
plt.figure()
plt.imshow(cleaned_ap_netmat)
plt.colorbar()
plt.title('AP Conn Matrix')
plt.figure()
cleaned_ap.shape
plt.imshow(cleaned_pa_netmat)
plt.colorbar()
plt.title('PA Conn Matrix')
plt.figure()
corr_dif = cleaned_ap_netmat - cleaned_pa_netmat
plt.imshow(np.abs(corr_dif), vmin=0, vmax=0.1)
plt.title('abs(AP - PA)')
plt.colorbar()
plt.figure()
plt.hist(np.abs(np.reshape(corr_dif, corr_dif.shape[0]**2)), bins = 20)
plt.title('abs(AP - PA) mean = ' + str(np.mean(np.abs(corr_dif))))
ap_arr = cleaned_ap_netmat[np.triu_indices(cleaned_ap_netmat.shape[0], k = 1)]
pa_arr = cleaned_pa_netmat[np.triu_indices(cleaned_pa_netmat.shape[0], k = 1)]
plt.figure()
plt.scatter(ap_arr, pa_arr)
plt.title('AP-PA corr: ' + str(np.corrcoef(ap_arr, pa_arr)[0,1]))
def find_mean_fd(path_to_func):
#For a functional path (must be pointing to fsaverage),
#and a list of confounds (from *desc-confounds_regressors.tsv).
#This function will make two matrices of shape (t x n), where
#t is the number of timepoints, and n the number of regressors.
#The first matrix will contain 'nuisance_vars' which will be
#a combination of the variables from list_of_confounds, and
#independent components identified as noise by ICA-AROMA.
#The second will contain the indpendent components not identified
#by ICA-AROMA, which are presumed to contain meaningful functional
#data
confound_path = path_to_func[:-31] + 'desc-confounds_regressors.tsv'
confound_df = pd.read_csv(confound_path, sep='\t')
partial_confounds = []
temp = confound_df.loc[ : , 'framewise_displacement' ]
fd_arr = np.copy(temp.values)
return np.mean(fd_arr[1:])
def convert_to_upper_arr(np_square_matrix):
"""
#Function that takes a square matrix,
#and outputs its upper triangle without
#the diagonal as an array
"""
inds = np.triu_indices(np_square_matrix.shape[0], k = 1)
return np_square_matrix[inds]
def demedian_parcellate_func_combine_hemis(lh_func, rh_func, lh_parcel_path, rh_parcel_path):
"""
#Function that takes functional data in the form <num_verts, num_timepoints> for
#both the left and right hemisphere, and averages the functional time series across
#all vertices defined in a given parcel, for every parcel, with the parcels identified
#by a annotation file specified at ?h_parcel_path. The function then returns a combined
#matrix of size <num_parcels, num_timepoints> and <num_labels> for the time series and
#parcel label names, respectively. The lh parcels will preceed the rh parcels in order.
#Prior to taking the average of all vertices, all vertices time signals are divided by their
#median signal intensity. The mean of all these medians within a given parcel is then
#exported with this function as the third argument
#NOTE: THIS ASSUMES THE FIRST PARCEL WILL BE MEDIAL WALL, AND DISREGARDS ANY VERTICES WITHIN
#THAT PARCEL. IF THIS IS NOT THE CASE FOR YOUR PARCELLATION, DO NOT USE THIS FUNCTION.
"""
#Output will be tuple of format [labels, ctab, names]
lh_parcels = nib.freesurfer.io.read_annot(lh_parcel_path)
rh_parcels = nib.freesurfer.io.read_annot(rh_parcel_path)
#Make array to store parcellated data with shape <num_parcels, num_timepoints>
lh_parcellated_data = np.zeros((len(lh_parcels[2]) - 1, lh_func.shape[1]))
rh_parcellated_data = np.zeros((len(rh_parcels[2]) - 1, rh_func.shape[1]))
lh_parcel_medians = np.zeros(len(lh_parcels[2]) - 1)
rh_parcel_medians = np.zeros(len(rh_parcels[2]) - 1)
lh_vertex_medians = np.nanmedian(lh_func, axis=1)
rh_vertex_medians = np.nanmedian(rh_func, axis=1)
lh_vertex_medians[np.where(lh_vertex_medians < 0.001)] = np.nan
rh_vertex_medians[np.where(rh_vertex_medians < 0.001)] = np.nan
lh_adjusted_func = lh_func/lh_vertex_medians[:,None]
rh_adjusted_func = rh_func/rh_vertex_medians[:,None]
#Start with left hemisphere
for i in range(1,len(lh_parcels[2])):
#Find the voxels for the current parcel
vois = np.where(lh_parcels[0] == i)
#Take the mean of all voxels of interest
lh_parcellated_data[i-1, :] = np.nanmean(lh_adjusted_func[vois[0],:], axis = 0)
lh_parcel_medians[i-1] = np.nanmean(lh_vertex_medians[vois[0]])
#Move to right hemisphere
for i in range(1,len(rh_parcels[2])):
vois = np.where(rh_parcels[0] == i)
rh_parcellated_data[i-1, :] = np.nanmean(rh_adjusted_func[vois[0],:], axis = 0)
rh_parcel_medians[i-1] = np.nanmean(rh_vertex_medians[vois[0]])
#Then concatenate parcel labels and parcel timeseries between the left and right hemisphere
#and drop the medial wall from label list
parcellated_data = np.vstack((lh_parcellated_data, rh_parcellated_data))
parcel_labels = lh_parcels[2][1:] + rh_parcels[2][1:]
parcel_medians = np.hstack((lh_parcel_medians, rh_parcel_medians))
#Try to convert the parcel labels from bytes to normal string
for i in range(0, len(parcel_labels)):
parcel_labels[i] = parcel_labels[i].decode("utf-8")
return parcellated_data, parcel_labels, parcel_medians
| 2.640625 | 3 |
software/fmcw.py | matthuszagh/fmcw | 14 | 12769798 | <reponame>matthuszagh/fmcw
#!/usr/bin/env python
from __future__ import annotations
from time import clock_gettime, CLOCK_MONOTONIC
import sys
from enum import IntEnum, auto
from typing import Union, Optional, Callable, List, Tuple
from pathlib import Path
from shutil import rmtree
from multiprocessing import Process, Pipe
from multiprocessing.connection import Connection
from queue import Queue
import numpy as np
from pyqtgraph.Qt import QtGui
import pyqtgraph as pg
from scipy import signal
from device import Device
BITMODE_SYNCFF = 0x40
CHUNKSIZE = 0x10000
SIO_RTS_CTS_HS = 0x1 << 8
START_FLAG = 0xFF
STOP_FLAG = 0x8F
RAW_LEN = 20480
FS = 40e6
TSWEEP = 1e-3
BANDWIDTH = 300e6
PASS_DB = 0.5
STOP_DB = -40
NUMTAPS = 120
BANDS = [0, 0.5e6, 1.0e6, 20e6]
BAND_GAIN = [1, 0]
DECIMATE = 20
DECIMATED_LEN = RAW_LEN // DECIMATE
BYTE_BITS = 8
ADC_BITS = 12
FIR_BITS = 13
WINDOW_BITS = 13
FFT_BITS = FIR_BITS + 1 + int(np.ceil(np.log2(DECIMATED_LEN)))
# TODO should be configuration option
HIST_RANGE = 3000
# dB min and max if no other value is set
DB_MIN = -180
DB_MAX = 0
DIST_INIT = 235
def dist_to_freq(dist: float, bw: float, ts: float) -> float:
"""
"""
return int(2 * dist * bw / (299792458 * ts))
FREQ_INIT = dist_to_freq(DIST_INIT, BANDWIDTH, TSWEEP)
def freq_to_dist(freq: float, bw: float, ts: float) -> float:
"""
"""
return int(299792458 * freq * ts / (2 * bw))
def _reverse_bits(val: int, nbits: int) -> int:
"""
"""
i = 0
newval = 0
while i < nbits:
mask = 0x1 << i
newval |= ((mask & val) >> i) << (nbits - 1 - i)
i += 1
return newval
HORIZONTAL_LINES = "----------\n"
class Data(IntEnum):
RAW = 0
FIR = 1
DECIMATE = 2
WINDOW = 3
FFT = 4
def data_to_fpga_output(data: Data) -> str:
"""
"""
if data == Data.RAW:
return "RAW"
elif data == Data.DECIMATE:
return "FIR"
elif data == Data.WINDOW:
return "WINDOW"
else:
return "FFT"
def data_from_str(strval: str) -> Data:
"""
"""
strval = strval.lower()
if strval == "raw" or strval == "r":
return Data.RAW
elif strval == "fir" or strval == "fi":
return Data.FIR
elif strval == "decimate" or strval == "d":
return Data.DECIMATE
elif strval == "window" or strval == "w":
return Data.WINDOW
elif strval == "fft" or strval == "ff":
return Data.FFT
raise RuntimeError("Invalid Data string.")
def data_sweep_len(data: Data) -> int:
"""
"""
if data <= Data.FIR:
return RAW_LEN
else:
return DECIMATED_LEN
def spectrum_len(data: Data) -> int:
"""
"""
return data_sweep_len(data) // 2 + 1
def nyquist_freq(data: Data) -> float:
"""
"""
if data == Data.RAW or data == Data.FIR:
return FS // 2
return FS // 2 // DECIMATE
# TODO: this should be in sync with the FPGA configuration. Currently,
# bit widths need to set twice independently.
def data_nbits(data: Data) -> int:
"""
"""
if data == Data.RAW:
return ADC_BITS
if data == Data.DECIMATE:
return FIR_BITS
if data == Data.WINDOW:
return WINDOW_BITS
if data == Data.FFT:
return FFT_BITS
raise ValueError("Invalid Data value.")
def pow2ceil(val: int) -> int:
"""
"""
res = 1
while val > res:
res *= 2
return res
def data_nbytes(nbits: int) -> int:
"""
"""
nbytes = nbits // 8
if nbits % nbytes != 0:
nbytes += 1
return pow2ceil(nbytes)
def data_nflags(data: Data) -> int:
"""
The number of start and stop flag bytes for a payload with a given
number of bits. Each sample is MSB-padded with 0s until it is
byte-aligned. The MSB of the sample + padding is guaranteed to be
zero. That is, at least 1 0-bit is padded to each sample. This
allows us to distinguish a start or stop flag from a sample. The
flag length is therefore equal to the number of bytes of the
sample + padding.
"""
sample_bits = data_nbits(data)
if data == Data.FFT:
sample_bits *= 2
nbytes = sample_bits // 8 + 1
return pow2ceil(nbytes)
def write(txt: str, newline: bool = True):
"""
"""
sys.stdout.write(txt)
if newline:
sys.stdout.write("\n")
sys.stdout.flush()
def number_from_array(arr: List[np.uint8], nbits: int) -> int:
"""
"""
uval = int.from_bytes(arr, byteorder="big")
mask = 2 ** (nbits - 1)
sval = -(uval & mask) + (uval & ~mask)
return sval
def path_is_subdir(path: Path) -> bool:
"""
"""
cwd = Path.cwd().as_posix()
return cwd == path.as_posix()[: len(cwd)] and not cwd == path.as_posix()
def subdivide_range(rg: int, divider: int) -> List[Tuple[int, int]]:
"""
"""
nbins = rg // divider
if not rg % divider == 0:
nbins += 1
ranges = []
last_upper = 0
for i in range(nbins - 1):
ranges.append((last_upper, last_upper + divider))
last_upper += divider
ranges.append((last_upper, rg - 1))
return ranges
def db_arr(indata, maxval, db_min, db_max):
"""
Convert an array of input data giving amplitude values to the
full-scale decibel equivalent. Additionally, clip the results to a
desired range.
:param indata: Input array of amplitude values.
:param maxval: Maximum amplitude denoting the full-scale value.
:param db_min: Minimum dB value to clip to.
:param db_max: Maximum dB value to clip to.
"""
arr = 20 * np.log10(indata / maxval)
return np.clip(arr, db_min, db_max)
def dbin(fs: float, tsweep: float, nsample: int, bandwidth: float) -> float:
"""
Distance (in m) for each DFT frequency bin.
"""
fbin = fs / nsample
d = 299792458 * tsweep * fbin / (2 * bandwidth)
return d
def plot_rate(nsweep: int, sec: float) -> str:
"""
:param nsweep: Number of complete sweeps.
:param sec: Total plot duration in seconds.
"""
return "Plot rate : {} sweeps/s".format(int(round(nsweep / sec)))
def usb_bandwidth(nbytes: int, sec: float) -> str:
"""
:param nbytes: Total number of bytes (including headers and
padding) transmitted over USB channel.
:param sec: Total plot duration in seconds.
"""
bwidth = nbytes / sec
for unit in ["B", "kB", "MB", "GB"]:
if bwidth < 10 ** 3:
return "USB Bandwidth : {} {}/s\n".format(round(bwidth, 3), unit)
bwidth /= 10 ** 3
return "USB Bandwidth : {} GB/s\n".format(round(bwidth, 3))
def avg_value(avg: float) -> str:
"""
"""
return "Average Value : {:.2f}".format(avg)
def sweep_total_bytes(fpga_output: Data) -> int:
"""
"""
sample_bits = data_nbits(fpga_output)
if fpga_output == Data.FFT:
sample_bits *= 2
sample_bytes = data_nbytes(sample_bits)
sweep_len = data_sweep_len(fpga_output)
flag_bytes = data_nflags(fpga_output)
return sample_bytes * sweep_len + 2 * flag_bytes
class PlotType(IntEnum):
TIME = auto()
SPECTRUM = auto()
HIST = auto()
def plot_type_from_str(strval: str) -> PlotType:
"""
"""
strval = strval.lower()
if strval == "time" or strval == "t":
return PlotType.TIME
elif strval == "spectrum" or strval == "s":
return PlotType.SPECTRUM
elif strval == "hist" or strval == "h":
return PlotType.HIST
raise RuntimeError("Invalid Data string.")
class Plot:
"""
"""
def __init__(self):
"""
"""
self._ptype = None
self._output = None
self._app = QtGui.QApplication([])
self._data = None
self.db_min = DB_MIN
self.db_max = DB_MAX
self.plot_path = None
self.tstart = None
self.tplot_start = None
self.tcurrent = None
# min and max data bins
self.min_bin = None
self.max_bin = None
# minimum axis value for spectrum and hist plots
self.min_axis_val = None
# maximum axis value for spectrum and hist plots
self.max_axis_val = None
# records saved plot number
self._fname = 0
@property
def ptype(self) -> PlotType:
"""
"""
return self._ptype
@ptype.setter
def ptype(self, newval: Data):
"""
"""
# if self._output is None:
# raise RuntimeError("Must set output type before plot type.")
# if self._ptype is not None:
# self._close_plot()
self._ptype = newval
@property
def output(self) -> Data:
"""
"""
return self._output
@ptype.setter
def output(self, newval: Data):
"""
"""
self._output = newval
def add_sweep(self, sweep: np.array) -> None:
"""
Plot the next sweep of data.
"""
if self._ptype is None:
raise ValueError("Must call set_type before adding sweeps.")
if self._ptype == PlotType.TIME:
self._add_time_sweep(sweep)
elif self._ptype == PlotType.SPECTRUM:
self._add_spectrum_sweep(sweep)
else:
self._add_hist_sweep(sweep)
def initialize_plot(self) -> None:
"""
"""
self._fname = 0
if self._ptype == PlotType.TIME:
self._data = np.zeros(data_sweep_len(self._output))
self._initialize_time_plot()
elif self._ptype == PlotType.SPECTRUM:
self._data = np.zeros(self.max_bin - self.min_bin)
self._initialize_spectrum_plot()
elif self._ptype == PlotType.HIST:
self._data = np.zeros((HIST_RANGE, self.max_bin - self.min_bin))
self._initialize_hist_plot()
else:
raise ValueError("Invalid plot type.")
self.tstart = clock_gettime(CLOCK_MONOTONIC)
def _initialize_time_plot(self) -> None:
"""
"""
self._win = QtGui.QMainWindow()
self._plt = pg.PlotWidget()
self._plt.setWindowTitle("Time Plot (" + self._output.name + ")")
self._plt.setXRange(0, self._data.shape[0])
self._win.setCentralWidget(self._plt)
self._win.show()
def _initialize_spectrum_plot(self) -> None:
"""
"""
self._win = QtGui.QMainWindow()
self._plt = pg.PlotWidget()
self._plt.setWindowTitle("Spectrum Plot (" + self._output.name + ")")
self._plt.getAxis("bottom").setTicks(self._freq_dist_ticks())
self._plt.setYRange(self.db_min, self.db_max)
self._win.setCentralWidget(self._plt)
self._win.show()
def _initialize_hist_plot(self) -> None:
"""
"""
self._xval = 0
self._tvals = []
self._win = QtGui.QMainWindow()
self._imv = pg.ImageView(view=pg.PlotItem())
self._img_view = self._imv.getView()
self._img_view.invertY(False)
self._img_view.setAspectLocked(lock=False)
self._img_view.getAxis("left").setTicks(self._freq_dist_ticks())
self._imv.setLevels(self.db_min, self.db_max)
self._win.setCentralWidget(self._imv)
self._win.show()
self._win.setWindowTitle(
"Range-Time Histogram (" + self._output.name + ")"
)
self._imv.setPredefinedGradient("flame")
hist_widget = self._imv.getHistogramWidget()
hist_widget.region.setBounds([self.db_min, self.db_max])
# TODO available in v0.11 (I think this is the correct method)
# hist_widget.region.setSpan([self.db_min, self.db_max])
def _close_plot(self) -> None:
"""
"""
self._win.close()
def _add_time_sweep(self, sweep: np.array) -> None:
"""
"""
# makes the update much faster
ranges = subdivide_range(self._data.shape[0], len(sweep) // 200)
for rg in ranges:
self._data[rg[0] : rg[1]] = sweep[rg[0] : rg[1]]
self._win.disableAutoRange()
self._win.plot(
np.linspace(0, self._data.shape[0] - 1, self._data.shape[0]),
self._data,
clear=True,
)
self._win.autoRange()
self._app.processEvents()
if self._save_plotsp():
self._save_plot()
self._data = np.zeros(self._data.shape)
def _add_spectrum_sweep(self, sweep: np.array) -> None:
"""
"""
self._plt.plot(
np.linspace(0, self._data.shape[0] - 1, self._data.shape[0]),
sweep,
clear=True,
)
self._app.processEvents()
if self._save_plotsp():
self._save_plot()
def _add_hist_sweep(self, sweep: np.array) -> None:
"""
"""
self._data[self._xval] = sweep
self._tvals.append(clock_gettime(CLOCK_MONOTONIC) - self.tstart)
self._img_view.getAxis("bottom").setTicks(self._time_ticks())
xrg = self._data.shape[0]
self._imv.setImage(
self._data,
xvals=[i for i in range(xrg)],
autoRange=False,
autoHistogramRange=False,
)
# speeds up bandwidth somewhat by reducing the burden of
# updating the plot.
if self._xval % 5 == 0:
self._app.processEvents()
self._xval += 1
if self._xval == xrg:
if self._save_plotsp():
self._save_plot()
self._data = np.zeros(np.shape(self._data))
self._xval = 0
self._tvals = []
def _time_ticks(self) -> List[List[Tuple[int, float]]]:
"""
X-Axis ticks for hist plot.
"""
ret = []
tval_len = len(self._tvals)
i = 0
while i < tval_len:
ret.append((i, "{:.0f}".format(self._tvals[i])))
i += HIST_RANGE // 10
return [ret]
def _freq_dist_ticks(self) -> List[List[Tuple[int, float]]]:
"""
"""
ret = []
slope = (self.max_axis_val - self.min_axis_val) / (
self.max_bin - self.min_bin
)
approx_num_ticks = 20
rg = self.max_axis_val - self.min_axis_val
inc = int(10 ** np.round(np.log10((rg / approx_num_ticks))))
if self.min_axis_val <= inc:
first_act_val = inc
else:
inc_val = inc
while inc_val < self.min_axis_val:
inc_val += inc
first_act_val = inc_val
act_vals = np.arange(first_act_val, self.max_axis_val, inc)
bin_vals = [int(np.round(act_val / slope)) for act_val in act_vals]
bin_vals = np.subtract(bin_vals, self.min_bin)
for i, j in zip(bin_vals, act_vals):
ret.append((i, "{:.3g}".format(j)))
return [ret]
def _save_plot(self) -> None:
"""
"""
# diffs = np.diff(self._tvals)
# for diff in diffs:
# print(diff)
pixmap = QtGui.QPixmap(self._win.size())
self._win.render(pixmap)
plot_dir = self.plot_path.as_posix()
if not plot_dir[-1] == "/":
plot_dir += "/"
pixmap.save(plot_dir + str(self._fname) + ".png")
self._fname += 1
def _save_plotsp(self) -> bool:
"""
True if plots should be saved.
"""
if not self.plot_path == Path.cwd():
return True
class Parameter:
"""
"""
def __init__(
self,
name: str,
number: int,
getter: Callable[[bool], Parameter],
setter: Callable[[str], None],
possible: Callable[[], str],
init: str,
):
"""
"""
self.name = name
self.number = number
self.getter = getter
self.setter = setter
self.possible = possible
self.setter(init)
def display(self, name_width: int) -> str:
"""
"""
return "{:{width}} : {value}\n".format(
self.name, width=name_width, value=self.getter(strval=True)
)
def display_number_menu(self) -> str:
"""
"""
return "{}. {}".format(self.number, self.name)
class Menu:
"""
"""
def __init__(
self,
name: str,
number: int,
parameter_configuration: List[Tuple[Parameter, str]],
message_func: Callable[[], str],
):
"""
"""
self.name = name
self.number = number
self.parameter_configuration = parameter_configuration
self.message_func = message_func
def display_number_menu(self) -> str:
"""
"""
return "{}. {}".format(self.number, self.name)
def set_parameters(self) -> None:
"""
"""
for ptuple in self.parameter_configuration:
ptuple[0].setter(ptuple[1])
class Configuration:
"""
"""
def __init__(self, plot: Plot, proc: Proc):
"""
"""
self.plot = plot
self.proc = proc
self._param_ctr = 0
self._menu_ctr = 0
# parameter variables
self._fpga_output = None
self._display_output = None
self.log_file = None
self.time = None
self.ptype = None
self.db_min = None
self.db_max = None
self.plot_dir = None
self.sub_last = None
self.channel = None
self.adf_fstart = None
self.adf_bandwidth = None
self.adf_tsweep = None
self.adf_tdelay = None
self.min_freq = None
self.max_freq = None
self.min_dist = None
self.max_dist = None
self.spectrum_axis = None
self.report_avg = None
self.params = [
Parameter(
name="FPGA output",
number=self._get_inc_param_ctr(),
getter=self._get_fpga_output,
setter=self._set_fpga_output,
possible=self._fpga_output_possible,
init="RAW",
),
Parameter(
name="display output",
number=self._get_inc_param_ctr(),
getter=self._get_display_output,
setter=self._set_display_output,
possible=self._display_output_possible,
init="FFT",
),
Parameter(
name="log file",
number=self._get_inc_param_ctr(),
getter=self._get_log_file,
setter=self._set_log_file,
possible=self._log_file_possible,
init="",
),
Parameter(
name="capture time (s)",
number=self._get_inc_param_ctr(),
getter=self._get_time,
setter=self._set_time,
possible=self._time_possible,
init="35",
),
Parameter(
name="plot type",
number=self._get_inc_param_ctr(),
getter=self._get_plot_type,
setter=self._set_plot_type,
possible=self._plot_type_possible,
init="hist",
),
Parameter(
name="dB min",
number=self._get_inc_param_ctr(),
getter=self._get_db_min,
setter=self._set_db_min,
possible=self._db_min_possible,
init="-120",
),
Parameter(
name="dB max",
number=self._get_inc_param_ctr(),
getter=self._get_db_max,
setter=self._set_db_max,
possible=self._db_max_possible,
init="-20",
),
Parameter(
name="plot save dir",
number=self._get_inc_param_ctr(),
getter=self._get_plot_dir,
setter=self._set_plot_dir,
possible=self._plot_dir_possible,
init="plots",
),
Parameter(
name="subtract last",
number=self._get_inc_param_ctr(),
getter=self._get_sub_last,
setter=self._set_sub_last,
possible=self._sub_last_possible,
init="true",
),
Parameter(
name="receiver channel",
number=self._get_inc_param_ctr(),
getter=self._get_channel,
setter=self._set_channel,
possible=self._channel_possible,
init="B",
),
Parameter(
name="ADF start frequency (Hz)",
number=self._get_inc_param_ctr(),
getter=self._get_adf_fstart,
setter=self._set_adf_fstart,
possible=self._adf_fstart_possible,
init="5.6e9",
),
Parameter(
name="ADF bandwidth (Hz)",
number=self._get_inc_param_ctr(),
getter=self._get_adf_bandwidth,
setter=self._set_adf_bandwidth,
possible=self._adf_bandwidth_possible,
init="300e6",
),
Parameter(
name="ADF sweep time (s)",
number=self._get_inc_param_ctr(),
getter=self._get_adf_tsweep,
setter=self._set_adf_tsweep,
possible=self._adf_tsweep_possible,
init="1e-3",
),
Parameter(
name="ADF delay time (s)",
number=self._get_inc_param_ctr(),
getter=self._get_adf_tdelay,
setter=self._set_adf_tdelay,
possible=self._adf_tdelay_possible,
init="2e-3",
),
Parameter(
name="min plotting frequency (Hz)",
number=self._get_inc_param_ctr(),
getter=self._get_min_freq,
setter=self._set_min_freq,
possible=self._min_freq_possible,
init="0",
),
Parameter(
name="max plotting frequency (Hz)",
number=self._get_inc_param_ctr(),
getter=self._get_max_freq,
setter=self._set_max_freq,
possible=self._max_freq_possible,
init=str(FREQ_INIT),
),
Parameter(
name="min plotting distance (m)",
number=self._get_inc_param_ctr(),
getter=self._get_min_dist,
setter=self._set_min_dist,
possible=self._min_dist_possible,
init="0",
),
Parameter(
name="max plotting distance (m)",
number=self._get_inc_param_ctr(),
getter=self._get_max_dist,
setter=self._set_max_dist,
possible=self._max_dist_possible,
init=str(DIST_INIT),
),
Parameter(
name="dist/freq axis",
number=self._get_inc_param_ctr(),
getter=self._get_spectrum_axis,
setter=self._set_spectrum_axis,
possible=self._spectrum_axis_possible,
init="dist",
),
Parameter(
name="report average",
number=self._get_inc_param_ctr(),
getter=self._get_report_avg,
setter=self._set_report_avg,
possible=self._report_avg_possible,
init="false",
),
]
self._param_name_width = self._max_param_name_width()
param_by_name = lambda x: [
param for param in self.params if param.name == x
][0]
self.menus = [
Menu(
name="Range Plot (235m)",
number=self._get_inc_menu_ctr(),
parameter_configuration=[
(param_by_name("FPGA output"), "RAW"),
(param_by_name("display output"), "FFT"),
(param_by_name("log file"), ""),
(param_by_name("capture time (s)"), "35"),
(param_by_name("plot type"), "hist"),
(param_by_name("dB min"), "-120"),
(param_by_name("dB max"), "-20"),
(param_by_name("plot save dir"), "plots"),
(param_by_name("subtract last"), "true"),
(param_by_name("receiver channel"), "B"),
(param_by_name("ADF start frequency (Hz)"), "5.6e9"),
(param_by_name("ADF bandwidth (Hz)"), "300e6"),
(param_by_name("ADF sweep time (s)"), "1e-3"),
(param_by_name("ADF delay time (s)"), "2e-3"),
(param_by_name("min plotting distance (m)"), "0"),
(param_by_name("max plotting distance (m)"), "235"),
(param_by_name("dist/freq axis"), "dist"),
(param_by_name("report average"), "false"),
],
message_func=self._range_plot_235_message,
),
Menu(
name="Noise Floor",
number=self._get_inc_menu_ctr(),
parameter_configuration=[
(param_by_name("FPGA output"), "RAW"),
(param_by_name("display output"), "RAW"),
(param_by_name("log file"), ""),
(param_by_name("capture time (s)"), "10"),
(param_by_name("plot type"), "spectrum"),
(param_by_name("dB min"), "-120"),
(param_by_name("dB max"), "-20"),
(param_by_name("plot save dir"), ""),
(param_by_name("subtract last"), "false"),
(param_by_name("receiver channel"), "B"),
(param_by_name("ADF start frequency (Hz)"), "5.6e9"),
(param_by_name("ADF bandwidth (Hz)"), "300e6"),
(param_by_name("ADF sweep time (s)"), "1e-3"),
(param_by_name("ADF delay time (s)"), "2e-3"),
(param_by_name("min plotting frequency (Hz)"), "50e3"),
(param_by_name("max plotting frequency (Hz)"), "1e6"),
(param_by_name("dist/freq axis"), "freq"),
(param_by_name("report average"), "true"),
],
message_func=self._noise_floor_message,
),
]
def display(self) -> str:
"""
"""
display_str = "Configuration:\n" + HORIZONTAL_LINES
for param in self.params:
display_str += "{:{width}} : ".format(
param.name, width=self._param_name_width
)
display_str += param.getter(strval=True)
display_str += "\n"
return display_str
def display_number_menu(self) -> str:
"""
"""
display_str = (
"Set options (enter the corresponding number):\n"
+ HORIZONTAL_LINES
)
for param in self.params:
display_str += param.display_number_menu()
display_str += "\n"
return display_str
def display_menu(self) -> str:
"""
"""
display_str = (
"Menu options (enter the corresponding number):\n"
+ HORIZONTAL_LINES
)
for menu in self.menus:
display_str += menu.display_number_menu()
display_str += "\n"
return display_str
def param_for_number(self, number: int) -> Parameter:
"""
"""
for param in self.params:
if number == param.number:
return param
raise RuntimeError("Invalid Parameter number.")
def menu_for_number(self, number: int) -> Menu:
"""
"""
for menu in self.menus:
if number == menu.number:
return menu
raise RuntimeError("Invalid Menu number.")
def logp(self) -> bool:
"""
True if data should be logged. False otherwise. This simply
checks that log file is a non-empty path.
"""
# The empty string resolves to the current working directory.
return not self.log_file.is_dir()
def _get_inc_param_ctr(self):
"""
"""
ret = self._param_ctr
self._param_ctr += 1
return ret
def _get_inc_menu_ctr(self):
"""
"""
ret = self._menu_ctr
self._menu_ctr += 1
return ret
def _set_fpga_output(self, newval: str):
"""
"""
newdata = data_from_str(newval)
if newdata == Data.FIR:
self._fpga_output = Data.DECIMATE
else:
self._fpga_output = newdata
self.proc.indata = self._fpga_output
def _get_fpga_output(self, strval: bool = False):
"""
"""
if strval:
return self._fpga_output.name
return self._fpga_output
def _fpga_output_possible(self) -> str:
"""
"""
return "{RAW, DECIMATE, WINDOW, FFT} (case insensitive)"
def _check_fpga_output(self) -> bool:
"""
"""
if self._fpga_output > self._display_output:
write(
"Display data cannot be from a processing stage that "
"preceeds FPGA output data."
)
return False
return True
def _set_display_output(self, newval: str):
"""
"""
newdata = data_from_str(newval)
self._display_output = newdata
self.proc.output = self._display_output
self.plot.output = self._display_output
def _get_display_output(self, strval: bool = False):
"""
"""
if strval:
return self._display_output.name
return self._display_output
def _display_output_possible(self) -> str:
"""
"""
return "{RAW, FIR, DECIMATE, WINDOW, FFT} (case insensitive)"
def _check_display_output(self) -> bool:
"""
"""
return True
def _set_log_file(self, newval: str):
"""
"""
self.log_file = Path(newval).resolve()
def _get_log_file(self, strval: bool = False):
"""
"""
if strval:
return self.log_file.as_posix()
return self.log_file
def _log_file_possible(self) -> str:
"""
"""
return "Any valid file path."
def _check_log_file(self):
"""
"""
return True
def _set_time(self, newval: str):
"""
"""
multiplier = 1
if newval[-1] == "s":
newval = newval[:-1]
elif newval[-1] == "m":
newval = newval[:-1]
multiplier *= 60
elif newval[-1] == "h":
newval = newval[:-1]
multiplier *= 60 ** 2
elif newval[-1] == "d":
newval = newval[:-1]
write("Time set to days. Are you sure this is correct?")
multiplier *= 60 ** 2 * 24
timeval = int(newval)
self.time = int(timeval * multiplier)
def _get_time(self, strval: bool = False):
"""
"""
if strval:
return str(self.time)
return self.time
def _time_possible(self) -> str:
"""
"""
return (
"Integer representing time. s, m, h, d can be appended \n"
"for seconds, minutes, hours or days if desired "
"(defaults to seconds if omitted)."
)
def _check_time(self) -> bool:
"""
"""
return True
def _get_plot_type(self, strval: bool = False):
"""
"""
if strval:
return self.ptype.name
return self.ptype
def _set_plot_type(self, newval: str):
"""
"""
ptype = plot_type_from_str(newval)
self.ptype = ptype
self.plot.ptype = self.ptype
if self.ptype == PlotType.SPECTRUM or self.ptype == PlotType.HIST:
self.proc.spectrum = True
else:
self.proc.spectrum = False
def _plot_type_possible(self) -> str:
"""
"""
return "{TIME (except FFT output), SPECTRUM, HIST} (case insensitive)"
def _check_plot_type(self) -> bool:
"""
"""
if self._fpga_output == Data.FFT and self.ptype == PlotType.TIME:
return False
return True
def _get_db_min(self, strval: bool = False):
"""
"""
if strval:
if self.db_min is None:
return "None"
return str(self.db_min)
return self.db_min
def _set_db_min(self, newval: Optional[str]):
"""
"""
if newval is None:
self.db_min = DB_MIN
else:
self.db_min = float(newval)
self.plot.db_min = self.db_min
self.proc.db_min = self.db_min
def _db_min_possible(self) -> str:
"""
"""
return "Any float or None, in which case no minimum clipping will be performed."
def _check_db_min(self) -> bool:
"""
"""
return True
def _get_db_max(self, strval: bool = False):
"""
"""
if strval:
if self.db_max is None:
return "None"
return str(self.db_max)
return self.db_max
def _set_db_max(self, newval: Optional[str]):
"""
"""
if newval is None:
self.db_max = DB_MAX
self.db_max = float(newval)
self.plot.db_max = self.db_max
self.proc.db_max = self.db_max
def _db_max_possible(self) -> str:
"""
"""
return "Any float or None, in which case no maximum clipping will be performed."
def _check_db_max(self) -> bool:
"""
"""
if self.db_max is None or self.db_min is None:
return True
if self.db_max > self.db_min:
return True
return False
def _get_plot_dir(self, strval: bool = False):
"""
"""
if strval:
return self.plot_dir.as_posix()
return self.plot_dir
def _set_plot_dir(self, newval: str):
"""
"""
self.plot_dir = Path(newval).resolve()
self.plot.plot_path = self.plot_dir
def _plot_dir_possible(self) -> str:
"""
"""
return (
"Any valid subdirectory path. If the directory already "
"exists, it will be emptied before new plots are added. "
"If it doesn't exist, it will be created."
)
def _check_plot_dir(self) -> bool:
"""
"""
if self.plot_dir == Path.cwd():
return True
if not path_is_subdir(self.plot_dir):
return False
if self.plot_dir.exists():
rmtree(self.plot_dir)
self.plot_dir.mkdir(parents=True)
return True
def _get_sub_last(self, strval: bool = False):
"""
"""
if strval:
if self.sub_last:
return "True"
return "False"
return self.sub_last
def _set_sub_last(self, newval: str):
"""
"""
newval_lower = newval.lower()
if newval_lower == "true" or newval_lower == "t":
self.sub_last = True
elif newval_lower == "false" or newval_lower == "f":
self.sub_last = False
else:
print(
"Invalid value for subtract last. Setting it to False. "
"Please reconfigure it with a permissible entry."
)
self.sub_last = False
self.proc.sub_last = self.sub_last
def _sub_last_possible(self) -> str:
"""
"""
return "True or false (case-insensitive)"
def _check_sub_last(self) -> bool:
"""
"""
return True
def _get_channel(self, strval: bool = False):
"""
"""
if strval:
if self.channel:
return self.channel
return "None"
return self.channel
def _set_channel(self, newval: str):
"""
"""
if newval.lower() == "a":
self.channel = "A"
elif newval.lower() == "b":
self.channel = "B"
else:
print(
"Invalid channel. Setting it to channel B. Please "
"reconfigure it with a permissible entry."
)
self.channel = "B"
def _channel_possible(self) -> str:
"""
"""
return "A or B (case-insensitive)"
def _check_channel(self) -> bool:
"""
"""
return True
def _get_adf_fstart(self, strval: bool = False):
"""
"""
if strval:
if self.adf_fstart:
return str(self.adf_fstart)
else:
return "None"
return self.adf_fstart
def _set_adf_fstart(self, newval: str):
"""
"""
self.adf_fstart = float(newval)
def _adf_fstart_possible(self) -> str:
"""
"""
return "5.3-5.6GHz"
def _check_adf_fstart(self) -> bool:
"""
"""
return True
def _get_adf_bandwidth(self, strval: bool = False):
"""
"""
if strval:
if self.adf_bandwidth:
return str(self.adf_bandwidth)
else:
return "None"
return self.adf_bandwidth
def _set_adf_bandwidth(self, newval: str):
"""
"""
self.adf_bandwidth = float(newval)
if not self.adf_tsweep is None:
if not self.min_freq is None:
self.min_dist = freq_to_dist(
self.min_freq, self.adf_bandwidth, self.adf_tsweep
)
if not self.max_freq is None:
self.max_dist = freq_to_dist(
self.max_freq, self.adf_bandwidth, self.adf_tsweep
)
def _adf_bandwidth_possible(self) -> str:
"""
"""
return "300-600MHz"
def _check_adf_bandwidth(self) -> bool:
"""
"""
return True
def _get_adf_tsweep(self, strval: bool = False):
"""
"""
if strval:
if self.adf_tsweep:
return str(self.adf_tsweep)
else:
return "None"
return self.adf_tsweep
def _set_adf_tsweep(self, newval: str):
"""
"""
self.adf_tsweep = float(newval)
if not self.adf_bandwidth is None:
if not self.min_freq is None:
self.min_dist = freq_to_dist(
self.min_freq, self.adf_bandwidth, self.adf_tsweep
)
if not self.max_freq is None:
self.max_dist = freq_to_dist(
self.max_freq, self.adf_bandwidth, self.adf_tsweep
)
def _adf_tsweep_possible(self) -> str:
"""
TODO should correctly report supported values.
"""
return "1e-3"
def _check_adf_tsweep(self) -> bool:
"""
"""
return True
def _get_adf_tdelay(self, strval: bool = False):
"""
"""
if strval:
if self.adf_tdelay:
return str(self.adf_tdelay)
else:
return "None"
return self.adf_tdelay
def _set_adf_tdelay(self, newval: str):
"""
"""
self.adf_tdelay = float(newval)
def _adf_tdelay_possible(self) -> str:
"""
TODO should correctly report supported values.
"""
return "2e-3"
def _check_adf_tdelay(self) -> bool:
"""
"""
return True
def _get_min_freq(self, strval: bool = False):
"""
"""
if strval:
return str(self.min_freq)
return self.min_freq
def _set_min_freq(self, newval: str):
"""
"""
self.min_freq = int(float(newval))
self.min_dist = freq_to_dist(
self.min_freq, self.adf_bandwidth, self.adf_tsweep
)
def _min_freq_possible(self) -> str:
"""
"""
return "Any non-negative integer less than the max frequency."
def _check_min_freq(self) -> bool:
"""
"""
if self.min_freq < self.max_freq and self.min_freq >= 0:
return True
return False
def _get_max_freq(self, strval: bool = False):
"""
"""
if strval:
return str(self.max_freq)
return self.max_freq
def _set_max_freq(self, newval: str):
"""
"""
self.max_freq = int(float(newval))
self.max_dist = freq_to_dist(
self.max_freq, self.adf_bandwidth, self.adf_tsweep
)
def _max_freq_possible(self) -> str:
"""
"""
return (
"Any non-negative integer greater than the min frequency "
"and no greater than the max frequency supported by the "
"display output."
)
def _check_max_freq(self) -> bool:
"""
"""
display_output_max_f = 1e6
if self._display_output == Data.RAW:
display_output_max_f = 20e6
if (
self.max_freq > self.min_freq
and self.max_freq <= display_output_max_f
):
return True
return False
def _get_min_dist(self, strval: bool = False):
"""
"""
if strval:
return str(self.min_dist)
return self.min_dist
def _set_min_dist(self, newval: str):
"""
"""
self.min_dist = int(newval)
self.min_freq = dist_to_freq(
self.min_dist, self.adf_bandwidth, self.adf_tsweep
)
def _min_dist_possible(self) -> str:
"""
"""
return "Any non-negative integer less than the max distance."
def _check_min_dist(self) -> bool:
"""
"""
if self.min_dist < self.max_dist and self.min_dist >= 0:
return True
return False
def _get_max_dist(self, strval: bool = False):
"""
"""
if strval:
return str(self.max_dist)
return self.max_dist
def _set_max_dist(self, newval: str):
"""
"""
self.max_dist = int(newval)
self.max_freq = dist_to_freq(
self.max_dist, self.adf_bandwidth, self.adf_tsweep
)
def _max_dist_possible(self) -> str:
"""
"""
return (
"Any non-negative integer greater than the min distance "
"and no greater than the max distance supported by the "
"display output and ADF configuration."
)
def _check_max_dist(self) -> bool:
"""
"""
display_output_max_f = 1e6
if self._display_output == Data.RAW:
display_output_max_f = 20e6
dist_max = freq_to_dist(
display_output_max_f, self.adf_bandwidth, self.adf_tsweep
)
if self.max_dist > self.min_dist and self.max_dist <= dist_max:
return True
return False
def _get_spectrum_axis(self, strval: bool = False):
"""
"""
return self.spectrum_axis
def _set_spectrum_axis(self, newval: str):
"""
"""
newval_lower = newval.lower()
if newval_lower == "freq" or newval_lower == "f":
self.spectrum_axis = "freq"
elif newval_lower == "dist" or newval_lower == "d":
self.spectrum_axis = "dist"
else:
print(
"Invalid spectrum axis specified. Setting it to dist. "
"Please reconfigure it with a permissible entry."
)
self.spectrum_axis = "dist"
def _spectrum_axis_possible(self) -> str:
"""
"""
return "freq or dist (case-insensitive)"
def _check_spectrum_axis(self) -> bool:
"""
"""
return True
def _get_report_avg(self, strval: bool = False):
"""
"""
if strval:
if self.report_avg:
return "True"
return "False"
return self.report_avg
def _set_report_avg(self, newval: str):
"""
"""
newval_lower = newval.lower()
if newval_lower == "true" or newval_lower == "t":
self.report_avg = True
elif newval_lower == "false" or newval_lower == "f":
self.report_avg = False
else:
print(
"Invalid value for report average. Setting it to False. "
"Please reconfigure it with a permissible entry."
)
self.report_avg = False
def _report_avg_possible(self) -> str:
"""
"""
return "True or false (case-insensitive)"
def _check_report_avg(self) -> bool:
"""
"""
return True
def _check_parameters(self) -> bool:
"""
"""
valid = True
valid &= self._check_fpga_output()
valid &= self._check_display_output()
valid &= self._check_log_file()
valid &= self._check_time()
valid &= self._check_plot_type()
valid &= self._check_db_min()
valid &= self._check_db_max()
valid &= self._check_plot_dir()
valid &= self._check_sub_last()
valid &= self._check_channel()
valid &= self._check_adf_fstart()
valid &= self._check_adf_bandwidth()
valid &= self._check_adf_tsweep()
valid &= self._check_adf_tdelay()
valid &= self._check_min_freq()
valid &= self._check_max_freq()
valid &= self._check_min_dist()
valid &= self._check_max_dist()
valid &= self._check_spectrum_axis()
valid &= self._check_report_avg()
return valid
def _noise_floor_message(self) -> str:
"""
"""
return (
"Ensure transmission port and receiver channel {} are "
"terminated with 50ohm loads."
).format(self._get_channel(True))
def _range_plot_235_message(self) -> str:
"""
"""
return ""
def _max_param_name_width(self) -> int:
"""
"""
width = 0
for param in self.params:
param_width = len(param.name)
if param_width > width:
width = param_width
return width
class Proc:
"""
Data processing.
"""
def __init__(self):
"""
"""
self._output = None
self.indata = None
self.spectrum = None
self._sub_last = None
self.last_seq = None
self.db_min = None
self.db_max = None
@property
def output(self) -> Data:
"""
"""
return self._output
@output.setter
def output(self, newval: Data):
"""
"""
self._output = newval
if self._output.value > Data.RAW:
self._init_fir()
if self._output.value > Data.DECIMATE:
self._init_window()
@property
def sub_last(self) -> bool:
"""
"""
return self._sub_last
@sub_last.setter
def sub_last(self, newval: bool):
"""
"""
self._sub_last = newval
def set_last_seq(self):
"""
"""
if self._sub_last:
self.last_seq = np.zeros(data_sweep_len(self.indata))
def _init_fir(self):
"""
"""
w = [1 / (1 - 10 ** (-PASS_DB / 20)), 1 / (10 ** (STOP_DB / 20))]
self.taps = signal.remez(
numtaps=NUMTAPS,
bands=BANDS,
desired=BAND_GAIN,
weight=w,
fs=FS,
type="bandpass",
)
def _init_window(self):
"""
"""
self.window_coeffs = np.kaiser(data_sweep_len(Data.WINDOW), 6)
def process_sequence(self, seq: np.array) -> np.array:
"""
"""
if self.sub_last:
new_seq = np.subtract(seq, self.last_seq)
self.last_seq = np.copy(seq)
seq = new_seq
if self.indata == Data.FFT:
seq = np.abs(seq)
i = self.indata.value
seq = seq.astype(np.double)
proc_func = [
self.perform_fir,
self.perform_decimate,
self.perform_window,
self.perform_fft,
]
while i < self.output.value:
seq = proc_func[i](seq)
i += 1
# normally, we should normalize the FFT FPGA output by
# dividing by N and then divide our maxval by N. However,
# these effects cancel and collectively have no net
# effect. Therefore, we omit both steps.
if self.indata == Data.FFT:
seq = seq[0 : spectrum_len(self.indata)]
nbits = data_nbits(self.indata)
maxval = 2 ** (nbits - 1)
# sub_last has a much greater effect on the FFT output than
# time-series outputs.
if self.indata == Data.FFT and self.sub_last:
maxval /= 2 << 5
if self.output == Data.FFT:
seq = db_arr(seq, maxval, self.db_min, self.db_max)
elif self.spectrum:
seq = self.perform_fft(seq)
seq = db_arr(seq, maxval, self.db_min, self.db_max)
return seq
def perform_fir(self, seq: np.array) -> np.array:
"""
"""
fir = np.convolve(seq, self.taps)
return fir[: len(seq)]
def perform_decimate(self, seq: np.array) -> np.array:
"""
"""
dec_arr = [seq[i] for i in range(len(seq)) if i % DECIMATE == 0]
return dec_arr
def perform_window(self, seq: np.array) -> np.array:
"""
"""
window = np.multiply(seq, self.window_coeffs)
return window
def perform_fft(self, seq: np.array) -> np.array:
"""
"""
fft = np.fft.rfft(seq)
# scaling this way makes the output FFT represent the sinusoid
# amplitude at each frequency bin.
fft /= len(fft) - 1
return np.abs(fft)
class Shell:
"""
"""
def __init__(self):
"""
"""
self.plot = Plot()
self.proc = Proc()
self.configuration = Configuration(self.plot, self.proc)
self.help()
self.prompt()
def prompt(self):
"""
"""
write("fmcw > ", newline=False)
self.read_input()
def set_prompt(self):
"""
"""
write("set > ", newline=False)
uinput = self._readline()
int_input = int(uinput)
param = self.configuration.param_for_number(int_input)
param_str = (
("Parameter : {}\n".format(param.name))
+ ("Current Value : {}\n".format(param.getter(strval=True)))
+ ("Possible Values : {}\n\n".format(param.possible()))
+ "**Note that when setting selection options (e.g. plot type),\n"
+ "it is only necessary to type the first characters that fully\n"
+ "differentiate the selection from all other choices.\n"
)
write(param_str)
write("new value > ", newline=False)
uinput = self._readline()
param.setter(uinput)
write("New value set.\n")
def menu_prompt(self):
"""
"""
write("menu > ", newline=False)
uinput = self._readline()
int_input = int(uinput)
menu = self.configuration.menu_for_number(int_input)
menu.set_parameters()
menu.message_func()
write("Menu selected.\n")
def read_input(self):
"""
"""
uinput = self._readline()
if uinput == "exit" or uinput == "e":
return
elif uinput == "help" or uinput == "h":
self.help()
elif uinput == "conf" or uinput == "c":
write(self.configuration.display(), newline=True)
elif uinput == "set" or uinput == "s":
write(self.configuration.display_number_menu(), newline=True)
self.set_prompt()
elif uinput == "menu" or uinput == "m":
write(self.configuration.display_menu(), newline=True)
self.menu_prompt()
elif uinput == "run" or uinput == "r":
if not self.configuration._check_parameters():
raise RuntimeError("Invalid configuration. Exiting.")
if self.configuration.spectrum_axis == "freq":
self.plot.min_axis_val = self.configuration.min_freq
self.plot.max_axis_val = self.configuration.max_freq
else:
self.plot.min_axis_val = self.configuration.min_dist
self.plot.max_axis_val = self.configuration.max_dist
min_bin = int(
np.round(
spectrum_len(self.configuration._display_output)
/ nyquist_freq(self.configuration._display_output)
* self.configuration.min_freq
)
)
max_bin = int(
np.round(
spectrum_len(self.configuration._display_output)
/ nyquist_freq(self.configuration._display_output)
* self.configuration.max_freq
)
)
self.plot.min_bin = min_bin
self.plot.max_bin = max_bin
self.plot.initialize_plot()
self.proc.set_last_seq()
self.run()
else:
write("Unrecognized input. Try again.")
self.help()
self.prompt()
def help(self):
"""
"""
help_str = (
"Available commands:\n"
+ HORIZONTAL_LINES
+ "conf : Display current configuration.\n"
+ "exit : Exit.\n"
+ "help : This display.\n"
+ (
"run : Instantiate the current configuration, \n"
" begin data acquisition, and display output.\n"
)
+ (
"set : Change the value of a configuration \n"
" variable.\n"
)
+ (
"menu : Automatically set configuration variables \n"
" based on one of several common tasks.\n"
)
# + "stop : Terminate the current data acquisition early.\n"
)
write(help_str)
def run(self) -> None:
"""
"""
nseq = 0
current_time = clock_gettime(CLOCK_MONOTONIC)
start_time = current_time
end_time = start_time + self.configuration.time
sweep_len = data_sweep_len(self.configuration._fpga_output)
sample_bits = data_nbits(self.configuration._fpga_output)
if self.configuration.logp():
log_file = self.configuration.log_file.as_posix()
else:
log_file = None
if self.configuration.report_avg:
avg = []
with Device() as radar:
radar.adf.fstart = self.configuration.adf_fstart
radar.adf.tsweep = self.configuration.adf_tsweep
radar.adf.tdelay = self.configuration.adf_tdelay
radar.adf.bandwidth = self.configuration.adf_bandwidth
radar.set_chan(self.configuration.channel)
radar.set_output(
data_to_fpga_output(self.configuration._fpga_output)
)
radar.set_adf_regs()
radar.start_acquisition(
log_file,
sample_bits,
sweep_len,
self.configuration._fpga_output == Data.FFT,
)
while current_time < end_time:
sweep = radar.read_sweep(sweep_len)
if sweep is not None:
proc_sweep = self.proc.process_sequence(sweep)
clipped_sweep = proc_sweep[
self.plot.min_bin : self.plot.max_bin
]
self.plot.add_sweep(clipped_sweep)
if self.configuration.report_avg:
avg.append(np.average(clipped_sweep))
nseq += 1
current_time = clock_gettime(CLOCK_MONOTONIC)
if self.configuration.report_avg:
write(avg_value(np.average(avg)))
write(plot_rate(nseq, current_time - start_time))
tbytes = sweep_total_bytes(self.configuration._fpga_output)
write(
usb_bandwidth(nseq * tbytes, current_time - start_time),
newline=True,
)
def _readline(self) -> str:
"""
"""
return sys.stdin.readline()[:-1]
if __name__ == "__main__":
shell = Shell()
| 2.03125 | 2 |
simdb/viewer/app/config.py | king-michael/simulation_database | 0 | 12769799 | class Config(object):
SQLALCHEMY_DATABASE_URI = 'sqlite:///app.db'
SQLALCHEMY_TRACK_MODIFICATIONS = False
SECRET_KEY = '1234567890'
SQLALCHEMY_ECHO = False
FLASK_ADMIN_SWATCH = 'united'
if __name__ == '__main__':
for key in Config.__dict__:
print(key, Config.__dict__[key]) | 2.34375 | 2 |
aiostripe/test/resources/test_coupon.py | cypreess/aiostripe | 0 | 12769800 | import unittest
import aiostripe
from aiostripe.test.helper import StripeResourceTest, DUMMY_COUPON
class CouponTest(StripeResourceTest):
async def test_create_coupon(self):
await aiostripe.Coupon.create(**DUMMY_COUPON)
self.requestor_mock.request.assert_called_with('post', '/v1/coupons',
DUMMY_COUPON, None)
async def test_update_coupon(self):
coup = aiostripe.Coupon.construct_from({
'id': 'cu_update',
'metadata': {},
}, 'api_key')
coup.metadata['key'] = 'value'
await coup.save()
self.requestor_mock.request.assert_called_with('post', '/v1/coupons/cu_update',
{
'metadata': {
'key': 'value',
}
}, None)
async def test_delete_coupon(self):
c = aiostripe.Coupon(id='cu_delete')
await c.delete()
self.requestor_mock.request.assert_called_with('delete', '/v1/coupons/cu_delete',
{}, None)
async def test_detach_coupon(self):
customer = aiostripe.Customer(id='cus_delete_discount')
await customer.delete_discount()
self.requestor_mock.request.assert_called_with('delete', '/v1/customers/cus_delete_discount/discount')
if __name__ == '__main__':
unittest.main()
| 2.46875 | 2 |
Python 3 - Curso completo/exercicio016.py | PedroMunizdeMatos/Estudos-e-Projetos | 0 | 12769801 | # faça u programa que leia um ângulo qualquer e mostre na etla o valor do seno, cosseno e tangente desse ângulo.
import math
n = int(input('Digite o ângulo: '))
sen = math.sin(math.radians(n))
cos = math.cos(math.radians(n))
tan = math.tan(math.radians(n))
print('Sabendo que o ângulo vale {}º, o seno vale {:.2f}, o cosseno {:.2f} e a tangente {:.2f}.'.format(n, sen, cos, tan))
| 4.03125 | 4 |
boggle.py | eliblaney/boggle | 0 | 12769802 | import nltk
from nltk.corpus import words
from pprint import pprint
end = '_end_'
def is_valid(trie, word):
pos = trie
for letter in word:
if letter not in pos:
return False
pos = pos[letter]
return end in pos
def is_prefix(trie, word):
pos = trie
for letter in word:
if letter not in pos:
return False
pos = pos[letter]
# If there's more than just '_end_', then it's a word prefix
return len(pos) > 1
neighbors = [(-1,-1),(-1,0),(0,-1),(1,0),(0,1),(1,1),(1,-1),(-1,1)]
def get_neighbors(row, col):
result = []
for row_offset, col_offset in neighbors:
r = row + row_offset
c = col + col_offset
if(0 <= r < ROW_LENGTH and 0 <= c < COL_LENGTH):
result.append((r, c))
return result
valid_words = set()
def dfs(board, row, col, trie, visited_path, curr):
letter = board[row][col]
visited_path.append((row, col))
curr += letter.lower()
if len(curr) >= 3 and is_valid(trie, curr):
valid_words.add(curr)
if not is_prefix(trie, curr):
return
curr_neighbors = get_neighbors(row, col)
for n in curr_neighbors:
if n not in visited_path:
dfs(board, n[0], n[1], trie, visited_path.copy(), curr)
print("Reading board...")
# Read board
board = []
with open("board.txt") as f:
lines = f.readlines()
for line in lines:
row = []
last = ""
for letter in line:
letter = letter.upper()
if letter == 'Q':
row.append('QU')
elif letter == 'U' and last == 'Q':
continue
elif letter == '\n':
continue
else:
row.append(letter)
last = letter
board.append(row)
ROW_LENGTH = len(board)
COL_LENGTH = len(board[0])
pprint(board)
print("Reading dictionary...")
# Get English words according to NLTK
word_list = words.words()
# Generate prefix trie
trie = dict()
for word in word_list:
pos = trie
last = ""
for letter in word:
# I hate the Qu tile
if letter == 'u' and last == 'q':
continue
if letter == 'q':
letter = 'qu'
pos = pos.setdefault(letter, {})
last = letter
pos[end] = end
print("Searching board...")
# Perform depth first search on the Boggle board
for row in range(ROW_LENGTH):
for col in range(COL_LENGTH):
dfs(board, row, col, trie, [], "")
print("Valid words:")
print(valid_words)
| 3.734375 | 4 |
coding_interviews/leetcode/easy/generate_the_string/generate_the_string.py | LeandroTk/Algorithms | 205 | 12769803 | <gh_stars>100-1000
# https://leetcode.com/problems/generate-a-string-with-characters-that-have-odd-counts
def generate_the_string(n):
if n % 2 == 0:
return 'a' * (n - 1) + 'b'
return 'a' * n | 3.296875 | 3 |
python/argparse/git.py | macgregor/demos | 0 | 12769804 | <reponame>macgregor/demos<filename>python/argparse/git.py
import argparse
def git_add(args):
print 'adding file to git...'
print 'parsed_args.filename = \'%s\'' % args.filename
print 'parsed_args.debug = \'%s\'' % args.debug
def git_commit(args):
print 'commiting to git...'
print 'parsed_args.message = \'%s\'' % args.message
print 'parsed_args.debug = \'%s\'' % args.debug
git_parser = argparse.ArgumentParser(prog='git', description='python demo for argpase - subparsers usage')
#configure the root parser object to process subcommands
git_subparsers = git_parser.add_subparsers()
#create a parent parser to inherit arguments from
parent = argparse.ArgumentParser(add_help=False)
parent.add_argument('-d', '--debug', action='store_true', default=False, help='enable debug logging')
#create a subparser to handle arguments when 'git add' is called
#the title of the subparser with the argument used as the command when using the cli
#pass in the parent parser with the debug flag using the 'parents' keyword, note it takes
#a list
git_file_add_parser = git_subparsers.add_parser('add', description='Add file contents to the index', parents=[parent])
git_file_add_parser.add_argument('filename', help='filename to add to index')
#this is a trick to have a function called by the parser which is matched
#so if git add ... is called the git_add() method will be called with the parsed args
#and it git commit ... is called the git_commit method will be called
git_file_add_parser.set_defaults(func=git_add)
#create a subparser to handle arguments when 'git commit' is called
git_commit_parser = git_subparsers.add_parser('commit', description='Record changes to the repository', parents=[parent])
git_commit_parser.add_argument('-m', '--message', help='Use the given <msg> as the commit message')
#another example of this can be found at https://docs.python.org/2.7/library/argparse.html#argparse.ArgumentParser.add_subparsers
git_commit_parser.set_defaults(func=git_commit)
args = git_parser.parse_args()
#call the function matched by subparsers
args.func(args)
| 3.546875 | 4 |
Books/TensorFlowDeepLearningCookbook/Chapter1_TensorFlow_An_Introduction/subchapter_03_Hello_world_in_TensorFlow.py | Tim232/Python-Things | 2 | 12769805 | <filename>Books/TensorFlowDeepLearningCookbook/Chapter1_TensorFlow_An_Introduction/subchapter_03_Hello_world_in_TensorFlow.py
import tensorflow as tf # tensorflow 임포트
message = tf.constant('Welcome to the exciting world of Deep Neural Networks!') # 상수 문자열을 사용하기 위해 tf.constant 사용
with tf.Session() as sess: # graph element 를 실행하기 위해 with 절을 사용해 Session 을 정의
print(sess.run(message).decode()) # run 메서드를 사용해 session 수행, decode 함수 미사용시 byte 형태로 출력
'''
▣ warning message and information message 없애는 방법
level 1: information message
level 2: warning message
level 3: error message
'''
import os
# os.environ('TF_CPP_MIN_LOG_LEVEL')='2' # level 2 까지 모든 메세지를 무시 | 3 | 3 |
network/__init__.py | K-Mahfoudh/Sign-Language-Translator | 26 | 12769806 | <gh_stars>10-100
from .network import Network | 1.054688 | 1 |
Services/Aslide/aslide.py | jinimp/QAS | 0 | 12769807 | <gh_stars>0
import os
from openslide import OpenSlide
from openslide import lowlevel as openslide_lowlevel
from Services.Aslide.kfb import kfb_lowlevel
from Services.Aslide.kfb.kfb_slide import KfbSlide
from Services.Aslide.tmap.tmap_slide import TmapSlide
class Aslide(object):
def __init__(self, filepath):
self.filepath = filepath
self.format = os.path.splitext(os.path.basename(filepath))[-1]
# try reader one by one
read_success = False
# 1. openslide
try:
self._osr = OpenSlide(filepath)
read_success = True
except:
pass
# 2. kfb
if not read_success and self.format in ['.kfb', '.KFB']:
try:
self._osr = KfbSlide(filepath)
read_success = True
except:
pass
# 3. tmap
if not read_success and self.format in ['.tmap', '.TMAP']:
try:
self._osr = TmapSlide(filepath)
if self._osr:
read_success = True
except:
pass
if not read_success:
raise Exception("UnsupportedFormat or Missing File => %s" % filepath)
@property
def mpp(self):
if hasattr(self._osr, 'get_scan_scale'):
return self._osr.get_scan_scale
else:
if hasattr(self._osr, 'properties'):
if 'openslide.mpp-x' in self._osr.properties:
mpp = float(self._osr.properties['openslide.mpp-x'])
return 20 if abs(mpp - 0.5) < abs(mpp - 0.25) else 40
raise Exception("%s Has no attribute %s" % (self._osr.__class__.__name__, "get_scan_scale"))
@property
def level_count(self):
return self._osr.level_count
@property
def dimensions(self):
return self._osr.dimensions
@property
def level_dimensions(self):
return self._osr.level_dimensions
@property
def level_downsamples(self):
return self._osr.level_downsamples
@property
def properties(self):
return self._osr.properties
# @property
# def associated_images(self):
# return self._osr.associated_images
@property
def label_image(self):
if self.format in ['.tmap', '.TMAP']:
return self._osr.associated_images('label')
else:
return self._osr.associated_images.get('label', None)
def get_best_level_for_downsample(self, downsample):
return self._osr.get_best_level_for_downsample(downsample)
def get_thumbnail(self, size):
"""
返回缩略图
:param size: (tuple) 需返回的缩略图尺寸
:return:
"""
return self._osr.get_thumbnail(size)
def read_region(self, location, level, size):
"""
返回区域图像
:param location: (tuple) – (x, y) tuple giving the top left pixel in the level 0 reference frame
:param level: (int) – the level number
:param size: (tuple) – (width, height) tuple giving the region size
:return: PIL.Image 对象
"""
return self._osr.read_region(location, level, size)
def read_fixed_region(self, location, level, size):
"""
返回区域图像
:param location: (tuple) – (x, y) tuple giving the top left pixel in the level 0 reference frame
:param level: (int) – the level number
:param size: (tuple) – (width, height) tuple giving the region size
:return: PIL.Image 对象
"""
return self._osr.read_fixed_region(location, level, size)
def close(self):
self._osr.close()
if __name__ == '__main__':
filepath = '/home/stimage/Development/DATA/TEST_DATA/test001.kfb'
slide = Aslide(filepath)
print("Format : ", slide.detect_format(filepath))
print("level_count : ", slide.level_count)
print("level_dimensions : ", slide.level_dimensions)
print("level_downsamples : ", slide.level_downsamples)
print("properties : ", slide.properties)
print("Associated Images : ")
for key, val in slide.associated_images.items():
print(key, " --> ", val)
print("best level for downsample 20 : ", slide.get_best_level_for_downsample(20))
im = slide.read_region((1000, 1000), 4, (1000, 1000))
print(im.mode)
im.show()
im.close()
| 2.3125 | 2 |
remimi/datasets/open3d.py | xiong-jie-y/remimi | 23 | 12769808 | import open3d as o3d
import os
import glob
import numpy as np
import json
class Open3DReconstructionDataset:
def __init__(self, root_dir):
self.root_dir = root_dir
self.len_frame = len(list(glob.glob(os.path.join(root_dir, "color/*.jpg"))))
def get_rgb_paths(self):
open3d_rgb_paths = []
for i in range(0, self.len_frame):
open3d_rgb_paths.append(os.path.join(self.root_dir, "color", '{:06}.jpg'.format(i)))
return open3d_rgb_paths
def get_depth_paths(self):
open3d_depth_paths = []
for i in range(0, self.len_frame):
open3d_depth_paths.append(os.path.join(self.root_dir, "depth", '{:06}.png'.format(i)))
return open3d_depth_paths
def get_trajectory(self):
lines = open(os.path.join(self.root_dir, "scene/trajectory.log"), 'r').readlines()
mats = []
for i in range(0, self.len_frame * 5, 5):
rows = [
[float(t) for t in lines[i + 1].split(" ")],
[float(t) for t in lines[i + 2].split(" ")],
[float(t) for t in lines[i + 3].split(" ")],
[float(t) for t in lines[i + 4].split(" ")]
]
mats.append(np.array(rows))
return mats
def get_intrinsic(self, type = "raw"):
if type == "raw":
return json.load(open(os.path.join(self.root_dir, "camera_intrinsic.json")))
elif type == "open3d":
intrinsics = json.load(open(os.path.join(self.root_dir, "camera_intrinsic.json")))
return o3d.camera.PinholeCameraIntrinsic(
intrinsics["width"],
intrinsics["height"],
intrinsics["intrinsic_matrix"][0],
intrinsics["intrinsic_matrix"][4],
intrinsics["intrinsic_matrix"][6],
intrinsics["intrinsic_matrix"][7],
)
elif type == "matrix":
intrinsics = json.load(open(os.path.join(self.root_dir, "camera_intrinsic.json")))
intrinsic_matrix = np.zeros((3, 3), dtype=np.float64)
fx = intrinsics["intrinsic_matrix"][0]
fy = intrinsics["intrinsic_matrix"][4]
cx = intrinsics["intrinsic_matrix"][6]
cy = intrinsics["intrinsic_matrix"][7]
intrinsic_matrix[0, 0] = fx
intrinsic_matrix[0, 2] = cx
intrinsic_matrix[1, 1] = fy
intrinsic_matrix[1, 2] = cy
intrinsic_matrix[2, 2] = 1
return intrinsic_matrix | 2.46875 | 2 |
gecko/geckolib/automation/keypad.py | mmillmor/home_assistant-components | 0 | 12769809 | <reponame>mmillmor/home_assistant-components<gh_stars>0
""" Gecko Keypads """
from .base import GeckoAutomationBase
class GeckoKeypad(GeckoAutomationBase):
""" Keypad management class """
def __init__(self, facade):
super().__init__(facade, "Keypad", "KEYPAD")
def __str__(self):
return f"{self.name}: Not implemented yet"
| 1.890625 | 2 |
Tutorial/Code/29. NumPy ufuncs.py | Deve-BlackHeart/Numpy | 1 | 12769810 | import numpy as np
x = [0, 1, 2, 3, 4]
y = [5, 6, 7, 8, 9]
z = []
for i, j in zip(x, y):
z.append(i + j)
print(z)
z = np.add(x, y)
print(z)
def my_add(a, b):
return a + b
my_add = np.frompyfunc(my_add, 2, 1)
z = my_add(x, y)
print(z)
print(type(np.add))
print(type(np.concatenate))
print(type(my_add))
| 3.28125 | 3 |
tests/workers/test_eval_cipher_suites_openssl1_0_2.py | timb-machine-mirrors/tlsmate | 0 | 12769811 | <reponame>timb-machine-mirrors/tlsmate
# -*- coding: utf-8 -*-
"""Implements a class to be used for unit testing.
"""
import pathlib
from tlsmate.workers.eval_cipher_suites import ScanCipherSuites
from tlsmate.tlssuite import TlsSuiteTester
from tlsmate.tlssuite import TlsLibrary
tls10_cs = [
"TLS_RSA_WITH_RC4_128_MD5",
"TLS_RSA_WITH_RC4_128_SHA",
"TLS_RSA_WITH_IDEA_CBC_SHA",
"TLS_RSA_WITH_3DES_EDE_CBC_SHA",
"TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA",
"TLS_DH_ANON_WITH_RC4_128_MD5",
"TLS_DH_ANON_WITH_3DES_EDE_CBC_SHA",
"TLS_RSA_WITH_AES_128_CBC_SHA",
"TLS_DHE_RSA_WITH_AES_128_CBC_SHA",
"TLS_DH_ANON_WITH_AES_128_CBC_SHA",
"TLS_RSA_WITH_AES_256_CBC_SHA",
"TLS_DHE_RSA_WITH_AES_256_CBC_SHA",
"TLS_DH_ANON_WITH_AES_256_CBC_SHA",
"TLS_RSA_WITH_CAMELLIA_128_CBC_SHA",
"TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA",
"TLS_DH_ANON_WITH_CAMELLIA_128_CBC_SHA",
"TLS_RSA_WITH_CAMELLIA_256_CBC_SHA",
"TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA",
"TLS_DH_ANON_WITH_CAMELLIA_256_CBC_SHA",
"TLS_RSA_WITH_SEED_CBC_SHA",
"TLS_DHE_RSA_WITH_SEED_CBC_SHA",
"TLS_DH_ANON_WITH_SEED_CBC_SHA",
"TLS_ECDH_ECDSA_WITH_RC4_128_SHA",
"TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA",
"TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA",
"TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA",
"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
"TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA",
"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
"TLS_ECDHE_RSA_WITH_RC4_128_SHA",
"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA",
"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
]
tls12_cs = [
"TLS_RSA_WITH_RC4_128_MD5",
"TLS_RSA_WITH_RC4_128_SHA",
"TLS_RSA_WITH_IDEA_CBC_SHA",
"TLS_RSA_WITH_3DES_EDE_CBC_SHA",
"TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA",
"TLS_DH_ANON_WITH_RC4_128_MD5",
"TLS_DH_ANON_WITH_3DES_EDE_CBC_SHA",
"TLS_RSA_WITH_AES_128_CBC_SHA",
"TLS_DHE_RSA_WITH_AES_128_CBC_SHA",
"TLS_DH_ANON_WITH_AES_128_CBC_SHA",
"TLS_RSA_WITH_AES_256_CBC_SHA",
"TLS_DHE_RSA_WITH_AES_256_CBC_SHA",
"TLS_DH_ANON_WITH_AES_256_CBC_SHA",
"TLS_RSA_WITH_AES_128_CBC_SHA256",
"TLS_RSA_WITH_AES_256_CBC_SHA256",
"TLS_RSA_WITH_CAMELLIA_128_CBC_SHA",
"TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA",
"TLS_DH_ANON_WITH_CAMELLIA_128_CBC_SHA",
"TLS_DHE_RSA_WITH_AES_128_CBC_SHA256",
"TLS_DHE_RSA_WITH_AES_256_CBC_SHA256",
"TLS_DH_ANON_WITH_AES_128_CBC_SHA256",
"TLS_DH_ANON_WITH_AES_256_CBC_SHA256",
"TLS_RSA_WITH_CAMELLIA_256_CBC_SHA",
"TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA",
"TLS_DH_ANON_WITH_CAMELLIA_256_CBC_SHA",
"TLS_RSA_WITH_SEED_CBC_SHA",
"TLS_DHE_RSA_WITH_SEED_CBC_SHA",
"TLS_DH_ANON_WITH_SEED_CBC_SHA",
"TLS_RSA_WITH_AES_128_GCM_SHA256",
"TLS_RSA_WITH_AES_256_GCM_SHA384",
"TLS_DHE_RSA_WITH_AES_128_GCM_SHA256",
"TLS_DHE_RSA_WITH_AES_256_GCM_SHA384",
"TLS_DH_ANON_WITH_AES_128_GCM_SHA256",
"TLS_DH_ANON_WITH_AES_256_GCM_SHA384",
"TLS_ECDH_ECDSA_WITH_RC4_128_SHA",
"TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA",
"TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA",
"TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA",
"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
"TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA",
"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
"TLS_ECDHE_RSA_WITH_RC4_128_SHA",
"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA",
"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
]
class TestCase(TlsSuiteTester):
"""Class used for tests with pytest.
For more information refer to the documentation of the TcRecorder class.
"""
sp_out_yaml = "profile_basic_openssl1_0_2"
recorder_yaml = "recorder_eval_cipher_suites_openssl1_0_2"
path = pathlib.Path(__file__)
server_cmd = (
"utils/start_openssl --version {library} --port {server_port} "
"--cert1 server-rsa --cert2 server-ecdsa "
"-- -www -cipher ALL"
)
library = TlsLibrary.openssl1_0_2
server = "localhost"
def check_cert_chain(self, cert_chain):
assert len(cert_chain) == 2
assert cert_chain[0]["id"] == 1
assert cert_chain[1]["id"] == 2
assert len(cert_chain[0]["cert_chain"]) == 2
assert len(cert_chain[1]["cert_chain"]) == 2
def check_versions(self, versions):
assert len(versions) == 6
assert versions[0]["version"]["name"] == "SSL20"
assert versions[0]["support"] == "FALSE"
assert versions[1]["version"]["name"] == "SSL30"
assert versions[1]["support"] == "TRUE"
assert versions[1]["ciphers"]["server_preference"] == "FALSE"
assert versions[2]["version"]["name"] == "TLS10"
assert versions[2]["support"] == "TRUE"
assert versions[2]["ciphers"]["server_preference"] == "FALSE"
assert versions[3]["version"]["name"] == "TLS11"
assert versions[3]["support"] == "TRUE"
assert versions[3]["ciphers"]["server_preference"] == "FALSE"
assert versions[4]["version"]["name"] == "TLS12"
assert versions[4]["support"] == "TRUE"
assert versions[4]["ciphers"]["server_preference"] == "FALSE"
assert versions[5]["version"]["name"] == "TLS13"
assert versions[5]["support"] == "FALSE"
for a, b in zip(tls10_cs, versions[1]["ciphers"]["cipher_suites"]):
assert a == b["name"]
for a, b in zip(tls10_cs, versions[2]["ciphers"]["cipher_suites"]):
assert a == b["name"]
for a, b in zip(tls10_cs, versions[3]["ciphers"]["cipher_suites"]):
assert a == b["name"]
for a, b in zip(tls12_cs, versions[4]["ciphers"]["cipher_suites"]):
assert a == b["name"]
def check_profile(self, profile):
self.check_cert_chain(profile["cert_chains"])
self.check_versions(profile["versions"])
def run(self, tlsmate, is_replaying):
for vers in ["sslv2", "sslv3", "tls10", "tls11", "tls12", "tls13"]:
tlsmate.config.set(vers, True)
server_profile = tlsmate.server_profile
ScanCipherSuites(tlsmate).run()
self.check_profile(server_profile.make_serializable())
if __name__ == "__main__":
TestCase().entry(is_replaying=False)
| 1.664063 | 2 |
neuralpp/inference/graphical_model/variable/variable.py | stefanwebb/neuralpp | 0 | 12769812 | from typing import Any
import torch
class Variable:
def __eq__(self, other) -> bool:
self._not_implemented("__eq__")
def __hash__(self):
self._not_implemented("__hash__")
def __repr__(self) -> str:
self._not_implemented("__repr__")
def featurize(self, value) -> torch.Tensor:
self._not_implemented("featurize")
def is_multivalue(self, value: Any) -> bool:
self._not_implemented("is_multivalue")
def _not_implemented(self, name):
error = NotImplementedError(f"{name} not implemented for {type(self)}")
raise error
| 2.734375 | 3 |
script/main.py | repen/SPyRender | 0 | 12769813 | """
Copyright 2021 <NAME> (<EMAIL>)
Licensed under the Apache License v2.0
http://www.apache.org/licenses/LICENSE-2.0
"""
from multiprocessing import Process, Lock
from tool import log as _log
import time, argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument('-head', '--headless', dest='headless',
default=False, action='store_true', help='режим безголового браузера')
args = parser.parse_args()
log = _log("MAIN")
def proc1():
from first_process import init
main = init( args.headless )
main()
def proc2():
from flasksrv import app_run
app_run()
def main():
p1 = Process( target=proc1, daemon=True)
p2 = Process( target=proc2, daemon=True )
p1.start()
time.sleep(5)
p2.start()
p1.join()
p2.join()
if __name__ == '__main__':
main() | 2.109375 | 2 |
mitmproxy/http.py | cifred98/mitmproxy | 1 | 12769814 | <filename>mitmproxy/http.py
import html
import time
from typing import Optional, Tuple
from mitmproxy import connections
from mitmproxy import flow
from mitmproxy import version
from mitmproxy.net import http
HTTPRequest = http.Request
HTTPResponse = http.Response
class HTTPFlow(flow.Flow):
"""
An HTTPFlow is a collection of objects representing a single HTTP
transaction.
"""
request: HTTPRequest
response: Optional[HTTPResponse] = None
error: Optional[flow.Error] = None
"""
Note that it's possible for a Flow to have both a response and an error
object. This might happen, for instance, when a response was received
from the server, but there was an error sending it back to the client.
"""
server_conn: connections.ServerConnection
client_conn: connections.ClientConnection
intercepted: bool = False
""" Is this flow currently being intercepted? """
mode: str
""" What mode was the proxy layer in when receiving this request? """
def __init__(self, client_conn, server_conn, live=None, mode="regular"):
super().__init__("http", client_conn, server_conn, live)
self.mode = mode
_stateobject_attributes = flow.Flow._stateobject_attributes.copy()
# mypy doesn't support update with kwargs
_stateobject_attributes.update(dict(
request=HTTPRequest,
response=HTTPResponse,
mode=str
))
def __repr__(self):
s = "<HTTPFlow"
for a in ("request", "response", "error", "client_conn", "server_conn"):
if getattr(self, a, False):
s += "\r\n %s = {flow.%s}" % (a, a)
s += ">"
return s.format(flow=self)
@property
def timestamp_start(self) -> float:
return self.request.timestamp_start
def copy(self):
f = super().copy()
if self.request:
f.request = self.request.copy()
if self.response:
f.response = self.response.copy()
return f
def make_error_response(
status_code: int,
message: str = "",
headers: Optional[http.Headers] = None,
) -> HTTPResponse:
body: bytes = """
<html>
<head>
<title>{status_code} {reason}</title>
</head>
<body>
<h1>{status_code} {reason}</h1>
<p>{message}</p>
</body>
</html>
""".strip().format(
status_code=status_code,
reason=http.status_codes.RESPONSES.get(status_code, "Unknown"),
message=html.escape(message),
).encode("utf8", "replace")
if not headers:
headers = http.Headers(
Server=version.MITMPROXY,
Connection="close",
Content_Length=str(len(body)),
Content_Type="text/html"
)
return HTTPResponse.make(status_code, body, headers)
def make_connect_request(address: Tuple[str, int]) -> HTTPRequest:
return HTTPRequest(
host=address[0],
port=address[1],
method=b"CONNECT",
scheme=b"",
authority=f"{address[0]}:{address[1]}".encode(),
path=b"",
http_version=b"HTTP/1.1",
headers=http.Headers(),
content=b"",
trailers=None,
timestamp_start=time.time(),
timestamp_end=time.time(),
)
def make_connect_response(http_version):
# Do not send any response headers as it breaks proxying non-80 ports on
# Android emulators using the -http-proxy option.
return HTTPResponse(
http_version,
200,
b"Connection established",
http.Headers(),
b"",
None,
time.time(),
time.time(),
)
def make_expect_continue_response():
return HTTPResponse.make(100)
| 2.953125 | 3 |
tests/file_handler/test_local.py | Gallaecio/spider-feeder | 0 | 12769815 | <reponame>Gallaecio/spider-feeder
from spider_feeder.file_handler import local
def test_open_local_file(mocker):
mock = mocker.patch('spider_feeder.file_handler.local.builtins.open')
local.open('/tmp/input_urls.txt', encoding='utf-8')
mock.assert_called_once_with('/tmp/input_urls.txt', encoding='utf-8')
def test_open_local_file_with_scheme(mocker):
mock = mocker.patch('spider_feeder.file_handler.local.builtins.open')
local.open('file:///tmp/input_urls.txt', encoding='latin-1')
mock.assert_called_once_with('/tmp/input_urls.txt', encoding='latin-1')
| 2.0625 | 2 |
tests/test_connection_pooling.py | ckwang8128/pycassa | 64 | 12769816 | import threading
import unittest
import time
from nose.tools import assert_raises, assert_equal, assert_true
from pycassa import ColumnFamily, ConnectionPool, InvalidRequestError,\
NoConnectionAvailable, MaximumRetryException, AllServersUnavailable
from pycassa.logging.pool_stats_logger import StatsLogger
from pycassa.cassandra.ttypes import ColumnPath
from pycassa.cassandra.ttypes import InvalidRequestException
from pycassa.cassandra.ttypes import NotFoundException
_credentials = {'username': 'jsmith', 'password': '<PASSWORD>'}
def _get_list():
return ['foo:bar']
class PoolingCase(unittest.TestCase):
def tearDown(self):
pool = ConnectionPool('PycassaTestKeyspace')
cf = ColumnFamily(pool, 'Standard1')
for key, cols in cf.get_range():
cf.remove(key)
def test_basic_pools(self):
pool = ConnectionPool('PycassaTestKeyspace', credentials=_credentials)
cf = ColumnFamily(pool, 'Standard1')
cf.insert('key1', {'col': 'val'})
pool.dispose()
def test_empty_list(self):
assert_raises(AllServersUnavailable, ConnectionPool, 'PycassaTestKeyspace', server_list=[])
def test_server_list_func(self):
stats_logger = StatsLoggerWithListStorage()
pool = ConnectionPool('PycassaTestKeyspace', server_list=_get_list,
listeners=[stats_logger], prefill=False)
assert_equal(stats_logger.serv_list, ['foo:bar'])
assert_equal(stats_logger.stats['list'], 1)
pool.dispose()
def test_queue_pool(self):
stats_logger = StatsLoggerWithListStorage()
pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000,
prefill=True, pool_timeout=0.1, timeout=1,
keyspace='PycassaTestKeyspace', credentials=_credentials,
listeners=[stats_logger], use_threadlocal=False)
conns = []
for i in range(10):
conns.append(pool.get())
assert_equal(stats_logger.stats['created']['success'], 10)
assert_equal(stats_logger.stats['checked_out'], 10)
# Pool is maxed out now
assert_raises(NoConnectionAvailable, pool.get)
assert_equal(stats_logger.stats['created']['success'], 10)
assert_equal(stats_logger.stats['at_max'], 1)
for i in range(0, 5):
pool.return_conn(conns[i])
assert_equal(stats_logger.stats['disposed']['success'], 0)
assert_equal(stats_logger.stats['checked_in'], 5)
for i in range(5, 10):
pool.return_conn(conns[i])
assert_equal(stats_logger.stats['disposed']['success'], 5)
assert_equal(stats_logger.stats['checked_in'], 10)
conns = []
# These connections should come from the pool
for i in range(5):
conns.append(pool.get())
assert_equal(stats_logger.stats['created']['success'], 10)
assert_equal(stats_logger.stats['checked_out'], 15)
# But these will need to be made
for i in range(5):
conns.append(pool.get())
assert_equal(stats_logger.stats['created']['success'], 15)
assert_equal(stats_logger.stats['checked_out'], 20)
assert_equal(stats_logger.stats['disposed']['success'], 5)
for i in range(10):
conns[i].return_to_pool()
assert_equal(stats_logger.stats['checked_in'], 20)
assert_equal(stats_logger.stats['disposed']['success'], 10)
assert_raises(InvalidRequestError, conns[0].return_to_pool)
assert_equal(stats_logger.stats['checked_in'], 20)
assert_equal(stats_logger.stats['disposed']['success'], 10)
print "in test:", id(conns[-1])
conns[-1].return_to_pool()
assert_equal(stats_logger.stats['checked_in'], 20)
assert_equal(stats_logger.stats['disposed']['success'], 10)
pool.dispose()
def test_queue_pool_threadlocal(self):
stats_logger = StatsLoggerWithListStorage()
pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000,
prefill=True, pool_timeout=0.01, timeout=1,
keyspace='PycassaTestKeyspace', credentials=_credentials,
listeners=[stats_logger], use_threadlocal=True)
conns = []
assert_equal(stats_logger.stats['created']['success'], 5)
# These connections should all be the same
for i in range(10):
conns.append(pool.get())
assert_equal(stats_logger.stats['created']['success'], 5)
assert_equal(stats_logger.stats['checked_out'], 1)
for i in range(0, 5):
pool.return_conn(conns[i])
assert_equal(stats_logger.stats['checked_in'], 1)
for i in range(5, 10):
pool.return_conn(conns[i])
assert_equal(stats_logger.stats['checked_in'], 1)
conns = []
assert_equal(stats_logger.stats['created']['success'], 5)
# A single connection should come from the pool
for i in range(5):
conns.append(pool.get())
assert_equal(stats_logger.stats['created']['success'], 5)
assert_equal(stats_logger.stats['checked_out'], 2)
for conn in conns:
pool.return_conn(conn)
conns = []
threads = []
stats_logger.reset()
def checkout_return():
conn = pool.get()
time.sleep(1)
pool.return_conn(conn)
for i in range(5):
threads.append(threading.Thread(target=checkout_return))
threads[-1].start()
for thread in threads:
thread.join()
assert_equal(stats_logger.stats['created']['success'], 0) # Still 5 connections in pool
assert_equal(stats_logger.stats['checked_out'], 5)
assert_equal(stats_logger.stats['checked_in'], 5)
# These should come from the pool
threads = []
for i in range(5):
threads.append(threading.Thread(target=checkout_return))
threads[-1].start()
for thread in threads:
thread.join()
assert_equal(stats_logger.stats['created']['success'], 0)
assert_equal(stats_logger.stats['checked_out'], 10)
assert_equal(stats_logger.stats['checked_in'], 10)
pool.dispose()
def test_queue_pool_no_prefill(self):
stats_logger = StatsLoggerWithListStorage()
pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000,
prefill=False, pool_timeout=0.1, timeout=1,
keyspace='PycassaTestKeyspace', credentials=_credentials,
listeners=[stats_logger], use_threadlocal=False)
conns = []
for i in range(10):
conns.append(pool.get())
assert_equal(stats_logger.stats['created']['success'], i + 1)
assert_equal(stats_logger.stats['checked_out'], i + 1)
# Pool is maxed out now
assert_raises(NoConnectionAvailable, pool.get)
assert_equal(stats_logger.stats['created']['success'], 10)
assert_equal(stats_logger.stats['at_max'], 1)
for i in range(0, 5):
pool.return_conn(conns[i])
assert_equal(stats_logger.stats['checked_in'], i + 1)
assert_equal(stats_logger.stats['disposed']['success'], 0)
for i in range(5, 10):
pool.return_conn(conns[i])
assert_equal(stats_logger.stats['checked_in'], i + 1)
assert_equal(stats_logger.stats['disposed']['success'], (i - 5) + 1)
conns = []
# These connections should come from the pool
for i in range(5):
conns.append(pool.get())
assert_equal(stats_logger.stats['created']['success'], 10)
assert_equal(stats_logger.stats['checked_out'], (i + 10) + 1)
# But these will need to be made
for i in range(5):
conns.append(pool.get())
assert_equal(stats_logger.stats['created']['success'], (i + 10) + 1)
assert_equal(stats_logger.stats['checked_out'], (i + 15) + 1)
assert_equal(stats_logger.stats['disposed']['success'], 5)
for i in range(10):
conns[i].return_to_pool()
assert_equal(stats_logger.stats['checked_in'], (i + 10) + 1)
assert_equal(stats_logger.stats['disposed']['success'], 10)
# Make sure a double return doesn't change our counts
assert_raises(InvalidRequestError, conns[0].return_to_pool)
assert_equal(stats_logger.stats['checked_in'], 20)
assert_equal(stats_logger.stats['disposed']['success'], 10)
conns[-1].return_to_pool()
assert_equal(stats_logger.stats['checked_in'], 20)
assert_equal(stats_logger.stats['disposed']['success'], 10)
pool.dispose()
def test_queue_pool_recycle(self):
stats_logger = StatsLoggerWithListStorage()
pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=1,
prefill=True, pool_timeout=0.5, timeout=1,
keyspace='PycassaTestKeyspace', credentials=_credentials,
listeners=[stats_logger], use_threadlocal=False)
cf = ColumnFamily(pool, 'Standard1')
columns = {'col1': 'val', 'col2': 'val'}
for i in range(10):
cf.insert('key', columns)
assert_equal(stats_logger.stats['recycled'], 5)
pool.dispose()
stats_logger.reset()
# Try with threadlocal=True
pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=1,
prefill=False, pool_timeout=0.5, timeout=1,
keyspace='PycassaTestKeyspace', credentials=_credentials,
listeners=[stats_logger], use_threadlocal=True)
cf = ColumnFamily(pool, 'Standard1')
for i in range(10):
cf.insert('key', columns)
pool.dispose()
assert_equal(stats_logger.stats['recycled'], 5)
def test_pool_connection_failure(self):
stats_logger = StatsLoggerWithListStorage()
def get_extra():
"""Make failure count adjustments based on whether or not
the permuted list starts with a good host:port"""
if stats_logger.serv_list[0] == 'localhost:9160':
return 0
else:
return 1
pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True,
keyspace='PycassaTestKeyspace', credentials=_credentials,
pool_timeout=0.01, timeout=0.05,
listeners=[stats_logger], use_threadlocal=False,
server_list=['localhost:9160', 'foobar:1'])
assert_equal(stats_logger.stats['failed'], 4 + get_extra())
for i in range(0, 7):
pool.get()
assert_equal(stats_logger.stats['failed'], 6 + get_extra())
pool.dispose()
stats_logger.reset()
pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True,
keyspace='PycassaTestKeyspace', credentials=_credentials,
pool_timeout=0.01, timeout=0.05,
listeners=[stats_logger], use_threadlocal=True,
server_list=['localhost:9160', 'foobar:1'])
assert_equal(stats_logger.stats['failed'], 4 + get_extra())
threads = []
for i in range(0, 7):
threads.append(threading.Thread(target=pool.get))
threads[-1].start()
for thread in threads:
thread.join()
assert_equal(stats_logger.stats['failed'], 6 + get_extra())
pool.dispose()
def test_queue_failover(self):
for prefill in (True, False):
stats_logger = StatsLoggerWithListStorage()
pool = ConnectionPool(pool_size=1, max_overflow=0, recycle=10000,
prefill=prefill, timeout=1,
keyspace='PycassaTestKeyspace', credentials=_credentials,
listeners=[stats_logger], use_threadlocal=False,
server_list=['localhost:9160', 'localhost:9160'])
cf = ColumnFamily(pool, 'Standard1')
for i in range(1, 5):
conn = pool.get()
setattr(conn, 'send_batch_mutate', conn._fail_once)
conn._should_fail = True
conn.return_to_pool()
# The first insert attempt should fail, but failover should occur
# and the insert should succeed
cf.insert('key', {'col': 'val%d' % i, 'col2': 'val'})
assert_equal(stats_logger.stats['failed'], i)
assert_equal(cf.get('key'), {'col': 'val%d' % i, 'col2': 'val'})
pool.dispose()
def test_queue_threadlocal_failover(self):
stats_logger = StatsLoggerWithListStorage()
pool = ConnectionPool(pool_size=1, max_overflow=0, recycle=10000,
prefill=True, timeout=0.05,
keyspace='PycassaTestKeyspace', credentials=_credentials,
listeners=[stats_logger], use_threadlocal=True,
server_list=['localhost:9160', 'localhost:9160'])
cf = ColumnFamily(pool, 'Standard1')
for i in range(1, 5):
conn = pool.get()
setattr(conn, 'send_batch_mutate', conn._fail_once)
conn._should_fail = True
conn.return_to_pool()
# The first insert attempt should fail, but failover should occur
# and the insert should succeed
cf.insert('key', {'col': 'val%d' % i, 'col2': 'val'})
assert_equal(stats_logger.stats['failed'], i)
assert_equal(cf.get('key'), {'col': 'val%d' % i, 'col2': 'val'})
pool.dispose()
stats_logger.reset()
pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000,
prefill=True, timeout=0.05,
keyspace='PycassaTestKeyspace', credentials=_credentials,
listeners=[stats_logger], use_threadlocal=True,
server_list=['localhost:9160', 'localhost:9160'])
cf = ColumnFamily(pool, 'Standard1')
for i in range(5):
conn = pool.get()
setattr(conn, 'send_batch_mutate', conn._fail_once)
conn._should_fail = True
conn.return_to_pool()
threads = []
args = ('key', {'col': 'val', 'col2': 'val'})
for i in range(5):
threads.append(threading.Thread(target=cf.insert, args=args))
threads[-1].start()
for thread in threads:
thread.join()
assert_equal(stats_logger.stats['failed'], 5)
pool.dispose()
def test_queue_retry_limit(self):
for prefill in (True, False):
stats_logger = StatsLoggerWithListStorage()
pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000,
prefill=prefill, max_retries=3, # allow 3 retries
keyspace='PycassaTestKeyspace', credentials=_credentials,
listeners=[stats_logger], use_threadlocal=False,
server_list=['localhost:9160', 'localhost:9160'])
# Corrupt all of the connections
for i in range(5):
conn = pool.get()
setattr(conn, 'send_batch_mutate', conn._fail_once)
conn._should_fail = True
conn.return_to_pool()
cf = ColumnFamily(pool, 'Standard1')
assert_raises(MaximumRetryException, cf.insert, 'key', {'col': 'val', 'col2': 'val'})
assert_equal(stats_logger.stats['failed'], 4) # On the 4th failure, didn't retry
pool.dispose()
def test_queue_failure_on_retry(self):
stats_logger = StatsLoggerWithListStorage()
pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000,
prefill=True, max_retries=3, # allow 3 retries
keyspace='PycassaTestKeyspace', credentials=_credentials,
listeners=[stats_logger], use_threadlocal=False,
server_list=['localhost:9160', 'localhost:9160'])
def raiser():
raise IOError
# Replace wrapper will open a connection to get the version, so if it
# fails we need to retry as with any other connection failure
pool._replace_wrapper = raiser
# Corrupt all of the connections
for i in range(5):
conn = pool.get()
setattr(conn, 'send_batch_mutate', conn._fail_once)
conn._should_fail = True
conn.return_to_pool()
cf = ColumnFamily(pool, 'Standard1')
assert_raises(MaximumRetryException, cf.insert, 'key', {'col': 'val', 'col2': 'val'})
assert_equal(stats_logger.stats['failed'], 4) # On the 4th failure, didn't retry
pool.dispose()
def test_queue_threadlocal_retry_limit(self):
stats_logger = StatsLoggerWithListStorage()
pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000,
prefill=True, max_retries=3, # allow 3 retries
keyspace='PycassaTestKeyspace', credentials=_credentials,
listeners=[stats_logger], use_threadlocal=True,
server_list=['localhost:9160', 'localhost:9160'])
# Corrupt all of the connections
for i in range(5):
conn = pool.get()
setattr(conn, 'send_batch_mutate', conn._fail_once)
conn._should_fail = True
conn.return_to_pool()
cf = ColumnFamily(pool, 'Standard1')
assert_raises(MaximumRetryException, cf.insert, 'key', {'col': 'val', 'col2': 'val'})
assert_equal(stats_logger.stats['failed'], 4) # On the 4th failure, didn't retry
pool.dispose()
def test_queue_failure_with_no_retries(self):
stats_logger = StatsLoggerWithListStorage()
pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000,
prefill=True, max_retries=3, # allow 3 retries
keyspace='PycassaTestKeyspace', credentials=_credentials,
listeners=[stats_logger], use_threadlocal=False,
server_list=['localhost:9160', 'localhost:9160'])
# Corrupt all of the connections
for i in range(5):
conn = pool.get()
setattr(conn, 'send_batch_mutate', conn._fail_once)
conn._should_fail = True
conn.return_to_pool()
cf = ColumnFamily(pool, 'Counter1')
assert_raises(MaximumRetryException, cf.insert, 'key', {'col': 2, 'col2': 2})
assert_equal(stats_logger.stats['failed'], 1) # didn't retry at all
pool.dispose()
def test_failure_connection_info(self):
stats_logger = StatsLoggerRequestInfo()
pool = ConnectionPool(pool_size=1, max_overflow=0, recycle=10000,
prefill=True, max_retries=0,
keyspace='PycassaTestKeyspace', credentials=_credentials,
listeners=[stats_logger], use_threadlocal=True,
server_list=['localhost:9160'])
cf = ColumnFamily(pool, 'Counter1')
# Corrupt the connection
conn = pool.get()
setattr(conn, 'send_get', conn._fail_once)
conn._should_fail = True
conn.return_to_pool()
assert_raises(MaximumRetryException, cf.get, 'greunt', columns=['col'])
assert_true('request' in stats_logger.failure_dict['connection'].info)
request = stats_logger.failure_dict['connection'].info['request']
assert_equal(request['method'], 'get')
assert_equal(request['args'], ('greunt', ColumnPath('Counter1', None, 'col'), 1))
assert_equal(request['kwargs'], {})
def test_pool_invalid_request(self):
stats_logger = StatsLoggerWithListStorage()
pool = ConnectionPool(pool_size=1, max_overflow=0, recycle=10000,
prefill=True, max_retries=3,
keyspace='PycassaTestKeyspace',
credentials=_credentials,
listeners=[stats_logger], use_threadlocal=False,
server_list=['localhost:9160'])
cf = ColumnFamily(pool, 'Standard1')
# Make sure the pool doesn't hide and retries invalid requests
assert_raises(InvalidRequestException, cf.add, 'key', 'col')
assert_raises(NotFoundException, cf.get, 'none')
pool.dispose()
class StatsLoggerWithListStorage(StatsLogger):
def obtained_server_list(self, dic):
StatsLogger.obtained_server_list(self, dic)
self.serv_list = dic.get('server_list')
class StatsLoggerRequestInfo(StatsLogger):
def connection_failed(self, dic):
StatsLogger.connection_failed(self, dic)
self.failure_dict = dic
| 2.15625 | 2 |
03 Queue/queue2Stacks.py | harshrajm/Python-Algo-DS | 0 | 12769817 | <filename>03 Queue/queue2Stacks.py
class Queue2Stacks:
def __init__(self):
self.stack1 = []
self.stack2 = []
def size(self):
return len(self.stack1)
def isEmpty(self):
return self.stack1 == []
def enqueue(self,item):
self.stack1.append(item)
def dequeue(self):
if(self.stack2 != []):
return self.stack2.pop()
else:
if(self.stack1 == []):
return "empty!!"
else:
for x in range(len(self.stack1)):
self.stack2.append(self.stack1.pop())
return self.stack2.pop()
#return out
q = Queue2Stacks()
q.enqueue("h")
q.enqueue("a")
q.enqueue("r")
q.enqueue("s")
q.enqueue("h")
print(q.size())
print(q.dequeue())
print(q.dequeue())
q.enqueue("r")
q.enqueue("a")
q.enqueue("j")
print(q.dequeue())
print(q.dequeue())
print(q.dequeue())
print(q.dequeue())
print(q.dequeue())
print(q.dequeue())
q.enqueue(1)
q.enqueue(2)
q.enqueue(3)
print(q.dequeue())
print(q.dequeue())
print(q.dequeue())
print(q.dequeue()) | 3.84375 | 4 |
Iterator.py | AutogenSh/design_patterns | 0 | 12769818 | from abc import abstractmethod
'''
迭代器模式(Iterator Pattern)是 Java 和 .Net 编程环境中非常常用的设计模式。
这种模式用于顺序访问集合对象的元素,不需要知道集合对象的底层表示。
意图:提供一种方法顺序访问一个聚合对象中各个元素, 而又无须暴露该对象的内部表示。
主要解决:不同的方式来遍历整个整合对象。
何时使用:遍历一个聚合对象。
如何解决:把在元素之间游走的责任交给迭代器,而不是聚合对象。
关键代码:定义接口:hasNext, next。
'''
class Iterator:
@abstractmethod
def hasNext(self) -> bool:
pass
@abstractmethod
def next(self) -> object:
pass
class Container:
@abstractmethod
def iter(self) -> Iterator:
pass
class GeneralIterator(Iterator):
def __init__(self, list) -> None:
super().__init__()
self.index = 0
self.list = list
def hasNext(self) -> bool:
if self.index < len(self.list):
return True
return False
def next(self) -> object:
if self.hasNext():
obj = self.list[self.index]
self.index += 1
return obj
class NameRepository(Container):
def __init__(self) -> None:
self.names = ['Robert', 'John', 'Julie', 'Lora']
def iter(self) -> Iterator:
return GeneralIterator(self.names)
if __name__ == '__main__':
repository = NameRepository()
it = repository.iter()
while (it.hasNext()):
print('%s' % it.next())
| 4.21875 | 4 |
excel_magic/splitter.py | HJFG/excel-magic | 0 | 12769819 | <reponame>HJFG/excel-magic
import xlrd
import xlsxwriter
import os
class Pointer:
def __init__(self, row: int, col: int):
self.row = row
self.col = col
def next_row(self, current_col=False):
if not current_col:
self.col = 0
self.row += 1
def next_col(self):
self.col += 1
def split_sheets(path: str, out: str = '', out_prefix: str = ''):
original_workbook = xlrd.open_workbook(path, formatting_info=True)
sheet: xlrd.sheet.Sheet
for sheet in original_workbook.sheets():
sheet_workbook = xlsxwriter.Workbook(os.path.join(out, out_prefix) + sheet.name + '.xlsx')
new_sheet = sheet_workbook.add_worksheet(sheet.name)
row_counter = 0
col_counter = 0
for row in sheet.get_rows():
cell: xlrd.sheet.Cell
for cell in row:
new_sheet.write(row_counter, col_counter, cell.value)
col_counter += 1
col_counter = 0
row_counter += 1
sheet_workbook.close()
class MultipleSheetsError(Exception):
pass
def split_rows(path: str, row_count: int, out: str = '', out_prefix: str = ''):
file_counter = 1
original_pointer = Pointer(0, 0)
sheet_pointer = Pointer(0, 0)
workbook = xlrd.open_workbook(path, formatting_info=True)
if workbook.sheets().__len__() > 1:
raise MultipleSheetsError('You have multiple sheets in this file')
sheet = workbook.sheet_by_index(0)
current_workbook = xlsxwriter.Workbook(os.path.join(out, out_prefix) + str(file_counter) + '.xlsx')
current_sheet = current_workbook.add_worksheet(sheet.name)
for row in sheet.get_rows():
for cell in row:
current_sheet.write(sheet_pointer.row, sheet_pointer.col, cell.value)
sheet_pointer.next_col()
original_pointer.next_col()
if sheet_pointer.row == row_count - 1:
sheet_pointer = Pointer(0, 0)
file_counter += 1
current_workbook.close()
current_workbook = xlsxwriter.Workbook(os.path.join(out, out_prefix) + str(file_counter) + '.xlsx')
current_sheet = current_workbook.add_worksheet(sheet.name)
else:
sheet_pointer.next_row()
original_pointer.next_row()
current_workbook.close()
| 3.0625 | 3 |
setify/__init__.py | MinaGabriel/setify | 0 | 12769820 | from . import datasets
from . import utils
__version__ = '0.9.3'
| 1.109375 | 1 |
bin/async_cnn_train.py | DwangoMediaVillage/marltas_core | 9 | 12769821 | """Asynchronized (distributed) cnn training."""
import os # noqa isort:skip
os.environ['OMP_NUM_THREADS'] = '1' # noqa isort:skip
import argparse
import logging
import pprint
import time
from dataclasses import asdict, dataclass
from functools import partial
from pathlib import Path
import numpy as np
from dqn.actor_manager import ActorManagerClient, run_actor_manager_server
from dqn.actor_runner import ActorRunner
from dqn.async_train import AsyncTrainerConfig, async_train
from dqn.cnn.config import CNNConfigBase
from dqn.cnn.datum import Batch
from dqn.cnn.evaluator import run_evaluator_server
from dqn.cnn.learner import Learner
from dqn.cnn.replay_buffer import ReplayBufferServer
from dqn.cnn.run_actor import run_actor
from dqn.evaluator import EvaluatorClient, EvaluatorServerRunner
from dqn.param_distributor import (ParamDistributorClient,
run_param_distributor_server)
from dqn.policy import PolicyParam
from dqn.subprocess_manager import SubprocessManager
from dqn.utils import init_log_dir, init_random_seed
@dataclass
class Config(CNNConfigBase):
"""Configuration of CNN asynchronized training."""
trainer: AsyncTrainerConfig = AsyncTrainerConfig()
def init_actor_runner(config: Config) -> ActorRunner:
"""Initialize actor runner.
Args:
config: Configuration of training.
"""
policy_param = PolicyParam(epsilon=np.ones(config.actor.vector_env_size),
gamma=np.ones(config.actor.vector_env_size) * config.gamma)
actor_runner = ActorRunner(n_processes=config.n_actor_process,
run_actor_func=partial(run_actor, init_policy_param=policy_param, config=config))
return actor_runner
def main_run_actor(config: Config, logger: logging.Logger = logging.getLogger(__name__)) -> None:
"""Run actor forever.
Args:
config: Training configuration.
logger: Logger object.
"""
actor_runner = init_actor_runner(config)
logger.info("Actor runner initialized.")
try:
actor_runner.start()
logger.info("Actor runner start.")
while True:
assert actor_runner.workers_alive, f"Actor runner's worker died."
time.sleep(1)
finally:
logger.info(f"Finalize actor runner")
actor_runner.finalize()
def main(log_dir: Path, enable_actor: bool, config: Config,
logger: logging.Logger = logging.getLogger(__name__)) -> None:
"""Initialize and kick all the components of asynchronized training.
Args:
log_dir: Directory to put log data.
config: Training configuration.
logger: Logger object.
"""
# show configuration
logger.info(pprint.pformat(asdict(config)))
# init config
if not enable_actor:
logger.warning('enable_actor is false. You should run actor in other process')
config.n_actor_process = 0 # disable actor
# NOTE: All child processes should be forked before init gRPC channel (https://github.com/grpc/grpc/issues/13873)
subprocess_manager = SubprocessManager()
# init actor manager
subprocess_manager.append_worker(
partial(run_actor_manager_server,
url=config.actor_manager_url,
gamma=config.gamma,
config=config.trainer.actor_manager))
# init param distributor
subprocess_manager.append_worker(partial(run_param_distributor_server, url=config.param_distributor_url))
# init evaluator
evaluator_runner = EvaluatorServerRunner(run_evaluator_server_func=partial(run_evaluator_server, config=config))
# may init actor
actor_runner = init_actor_runner(config)
# init replay buffer
replay_buffer_server = ReplayBufferServer(config=config)
# init learner
learner = Learner(config=config)
try:
def check_subprocess_func():
"""Helper function to check child processes."""
assert subprocess_manager.workers_alive, 'Subprocess manager worker has been dead'
assert evaluator_runner.workers_alive, 'Evaluator runner worker has been dead'
assert actor_runner.workers_alive, 'Actor runner worker has been dead'
check_subprocess_func()
# init gRPC clients
evaluator_runner.start()
actor_runner.start()
evaluator_client = EvaluatorClient(url=config.evaluator_url)
param_distributor_client = ParamDistributorClient(url=config.param_distributor_url)
actor_manager_client = ActorManagerClient(url=config.actor_manager_url)
# run train
async_train(log_dir=log_dir,
check_subprocess_func=check_subprocess_func,
actor_manager_client=actor_manager_client,
evaluator_client=evaluator_client,
param_distributor_client=param_distributor_client,
replay_buffer_server=replay_buffer_server,
learner=learner,
batch_from_sample=Batch.from_buffer_sample,
config=config.trainer)
finally:
replay_buffer_server.finalize()
subprocess_manager.finalize()
evaluator_runner.finalize()
actor_runner.finalize()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Asynchronized CNN-DQN training.")
parser.add_argument('log_dir', type=Path, help="Directory to put log and snapshots")
parser.add_argument('--log_level',
type=str,
choices=('debug', 'info', 'error', 'critical'),
default='info',
help="Logging level")
parser.add_argument('--disable_actor', action='store_true', help="Disable actor module or not.")
parser.add_argument('--run_only_actor', action='store_true', help="Running only actor module or not.")
parser.add_argument('--config', type=Path, help="Path of DQN configuration YAML file.")
parser.add_argument('--seed', type=int, default=1, help="Random seed value.")
args = parser.parse_args()
# init configuration
config = Config.load_from_yaml(args.config) if args.config else Config()
# init log_dir
log_handlers = [logging.StreamHandler()]
if not args.run_only_actor:
args.log_dir.mkdir(exist_ok=False, parents=False)
init_log_dir(args.log_dir, config)
log_handlers.append(logging.FileHandler(args.log_dir / 'main.log'))
# init logger
logging.basicConfig(level=getattr(logging, args.log_level.upper()),
format='[%(asctime)s %(name)s %(levelname)s] %(message)s',
datefmt='%Y/%m/%d %I:%M:%S',
handlers=log_handlers)
# init random seed
init_random_seed(args.seed)
# start training or exploration
if args.run_only_actor:
assert not args.disable_actor, 'run_actor should be specified without disable_actor.'
main_run_actor(config)
else:
main(args.log_dir, not args.disable_actor, config)
| 2.265625 | 2 |
Lib/site-packages/pylint/config/help_formatter.py | edupyter/EDUPYTER38 | 0 | 12769822 | <gh_stars>0
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
# Copyright (c) https://github.com/PyCQA/pylint/blob/main/CONTRIBUTORS.txt
from __future__ import annotations
import argparse
from pylint.config.callback_actions import _CallbackAction
from pylint.constants import DEFAULT_PYLINT_HOME, OLD_DEFAULT_PYLINT_HOME
class _HelpFormatter(argparse.RawDescriptionHelpFormatter):
"""Formatter for the help message emitted by argparse."""
def _get_help_string(self, action: argparse.Action) -> str | None:
"""Copied from argparse.ArgumentDefaultsHelpFormatter."""
assert action.help
help_string = action.help
# CallbackActions don't have a default
if isinstance(action, _CallbackAction):
return help_string
if "%(default)" not in help_string:
if action.default is not argparse.SUPPRESS:
defaulting_nargs = [argparse.OPTIONAL, argparse.ZERO_OR_MORE]
if action.option_strings or action.nargs in defaulting_nargs:
help_string += " (default: %(default)s)"
return help_string
@staticmethod
def get_long_description() -> str:
return f"""
Environment variables:
The following environment variables are used:
* PYLINTHOME Path to the directory where persistent data for the run will
be stored. If not found, it defaults to '{DEFAULT_PYLINT_HOME}'
or '{OLD_DEFAULT_PYLINT_HOME}' (in the current working directory).
* PYLINTRC Path to the configuration file. See the documentation for the method used
to search for configuration file.
Output:
Using the default text output, the message format is :
MESSAGE_TYPE: LINE_NUM:[OBJECT:] MESSAGE
There are 5 kind of message types :
* (I) info, for informational messages
* (C) convention, for programming standard violation
* (R) refactor, for bad code smell
* (W) warning, for python specific problems
* (E) error, for probable bugs in the code
* (F) fatal, if an error occurred which prevented pylint from doing further processing.
Output status code:
Pylint should leave with following bitwise status codes:
* 0 if everything went fine
* 1 if a fatal message was issued
* 2 if an error message was issued
* 4 if a warning message was issued
* 8 if a refactor message was issued
* 16 if a convention message was issued
* 32 on usage error
"""
| 2.265625 | 2 |
tests/tests.py | granttremblay/HRCbus | 0 | 12769823 | #!/usr/bin/env python
# Set the path explicitly #
sys.path.insert(0, os.path.abspath(__file__+"/../.."))
import unittest
class BasicTestSuit(unittest.TestCase):
"""Basic test cases."""
def test_absolute_truth_and_meaning(self):
assert True
def test_check_environment(self):
hrcbus.check_environment()
pass
if __name__ == '__main__':
unittest.main()
| 2.671875 | 3 |
src/cluster/migrations/0006_auto_20180801_1521.py | mrc-rius/computational_marketing_master_thesis | 0 | 12769824 | <filename>src/cluster/migrations/0006_auto_20180801_1521.py
# Generated by Django 2.0.5 on 2018-08-01 13:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cluster', '0005_costs'),
]
operations = [
migrations.RemoveField(
model_name='vehicle',
name='vehicle_power',
),
migrations.RemoveField(
model_name='vehicle',
name='vehicle_price',
),
migrations.AddField(
model_name='vehicle',
name='vehicle_max_power',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='vehicle',
name='vehicle_max_price',
field=models.DecimalField(decimal_places=2, default=0, max_digits=10),
),
migrations.AddField(
model_name='vehicle',
name='vehicle_min_power',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='vehicle',
name='vehicle_min_price',
field=models.DecimalField(decimal_places=2, default=0, max_digits=10),
),
migrations.AddField(
model_name='vehicle',
name='vehicle_power_unit',
field=models.CharField(default='KW', max_length=5),
),
]
| 1.515625 | 2 |
src/chapters/wall/hyperspace_helper/SceneManager.py | scottalmond/EscapeRoom | 1 | 12769825 |
# class does ...
import sys
import numpy as np
import math
import random
import time
from enum import Enum
from chapters.wall.hyperspace_helper.Segment import Segment
from chapters.wall.hyperspace_helper.AssetLibrary import AssetLibrary
from chapters.wall.hyperspace_helper.RingAssembly import RingAssembly
from chapters.wall.hyperspace_helper.Curve import Curve
from chapters.wall.hyperspace_helper.Maze import Maze
class SCENE_STATE(Enum):
INTRO=0 #pod exiting space ship
OUTRO=1 #pod entering white success
DEATH=2 #pod entering black death
PLAY=3 #normal operation (birth and hot controls)
#precon: first segment is id=0 (where to look in file) and is straight
# (ensure camera is lined up with pod after intro sequence)
#strongly advised: last segment before death and outro be straight to allow for extrapolation
#track the status of the pod through the maze
class SceneManager:
MAX_POD_DISPLACEMENT=3.5 #2.8 #maximum distance the pod can be from the center of the playfield
# ~4x of Segment.BRANCH_DISPLACEMENT_DISTANCE
POD_TRANSLATION_PER_SECOND=10.0 #rate of pod movement per second
POD_ROTATION_DEGREES_PER_SECOND=70.0 #rate of rotation animatic when pod is translating
POD_MAX_ROTATION=[6.0,12.0] #x-translation, y-translation, degrees
INTRO_SECONDS=1 #number of seconds to wait on start for cut scene to play
OUTRO_SECONDS=1
DEATH_SECONDS=1
CAMERA_LAG_DISTANCE=12 #pi3d distance unit between camera and pod
def __init__(self):
self.np=np #for some reason, Python forgets that np was imported...???? so it needs to be stored here for later use... idk/idc
def clean(self,pi3d,display_3d,camera_3d):
#variables
self.pi3d=pi3d
#why is np appaear as a UnboundedLocalError? I imported it up above...
self.pod_offset=self.np.array([0.0,0.0]) #x,y offset
self.pod_offset_rate=self.np.array([0.0,0.0]) #Z,X rotation angles for translation animatic (rotate right to translate right)
self.scene={'state':SCENE_STATE.INTRO,'start_seconds':0,'end_seconds':self.INTRO_SECONDS,'ratio':0.0}
self.life=0
self.level_start_time_seconds=0
self.segment_list=[]
self.pod_segment=None
self.camera_segment=None
self.last_key=-1 #delete from final program - used for smoothing pi3d keyboard inputs
#playfield
self.display = display_3d #self.pi3d.Display.create(background=(0.0, 0.0, 0.0, 0.0))
self.camera = camera_3d #self.pi3d.Camera()
self.light = self.pi3d.Light(lightpos=(10,-10,-7),lightcol=(0.75,0.75,0.45), lightamb=(0.1,0.1,0.42),is_point=False)
#self.keys = self.pi3d.Keyboard() #TODO: remove later...
#objects
self.asset_library=AssetLibrary(self.pi3d)
self.pod=self.asset_library.pod_frame.shallow_clone() #note: all children remain intact
self.maze=Maze()
#debug testing
self.maze.clean()
print(self.maze.getSegmentsBetweenNodes(100,91))
print(self.maze.getSegmentsBetweenNodes(91,100))
print(self.maze.getSegmentsBetweenNodes(91,91))
#print(maze.linear_definition)
#print(maze.branch_definition)
#print(maze.segment_definition)
#print(maze.debris_definition)
segments=self.maze.getSegmentIdAfter(2,3)
print("SceneManager.clean: Next segment: ",segments)
segments=segments[0]
temp2=self.maze.getPopulatedSegment(segments["segment_id"],segments["is_forward"],segments["is_branch"],self.asset_library,self.np.array([0,0,0]),self.np.eye(3),0)
print("SceneManager.clean: populated: ",temp2)
temp3=self.maze.getFirstPopulatedSegment(self.asset_library,0)
print("SceneManager.clean: first segment: ",temp3)
def __getRingCount(self):
count=0
for segment in self.segment_list:
count+=len(segment.ring_assembly_list)
return count
#update list of parameterized arcs
def __updateSegmentQueue(self,level_elapsed_time_seconds):
#if any segments in list are u<0 for camera (already completely used), then dispose of segment
#if any segment has no succesor and the [end time - current time] < queue_time_depth
# then get and append successor
#initialization
if(len(self.segment_list)==0):
segment_joint=self.getSegmentAfter(None)
first_segment=segment_joint['next_segment'][0]
self.segment_list.append(first_segment)
self.pod_segment=first_segment
self.camera_segment=first_segment
#append segments to end when the end is near
segment_index=0
while(segment_index<len(self.segment_list)): #keep adding segments to end when needed
segment=self.segment_list[segment_index]
end_time=segment.durationSeconds()+segment.start_time_seconds
cut_off_time=level_elapsed_time_seconds+RingAssembly.PRE_RENDER_SECONDS
#if(level_elapsed_time_seconds<7):
# print('query: '+str(end_time)+"<"+str(cut_off_time))
# print('size: '+str(len(self.segment_list)))
if(end_time<cut_off_time and segment.hasTraceabilityTo(self.pod_segment)):
if(segment.is_branch):
if(segment.successor[1] is None):
segment_joint=self.getSegmentAfter(segment)
for itr in range(2):
seg_id=itr+1
self.segment_list.append(segment_joint['next_segment'][seg_id])
segment.successor[seg_id]=segment_joint['next_segment'][seg_id]
segment_joint['next_segment'][seg_id].predecessor=segment
else:
if(segment.successor[0] is None):
segment_joint=self.getSegmentAfter(segment)
self.segment_list.append(segment_joint['next_segment'][0])
segment.successor[0]=segment_joint['next_segment'][0]
segment_joint['next_segment'][0].predecessor=segment
segment_index+=1
#remove old segments
camera_time=self.__getCameraTime(level_elapsed_time_seconds)
for segment_index in reversed(range(len(self.segment_list))): #traverse backward to allow for deletion
segment=self.segment_list[segment_index]
ratio=segment.getRatio(camera_time)
if(ratio>1):
if(not segment==self.camera_segment):
segment=self.segment_list.pop(segment_index) #delete stale segments
segment.dispose()
#update graphical rotation of rings, derbis, etc
def __updateSegments(self,level_elapsed_time_seconds):
for segment in self.segment_list:
segment.update(level_elapsed_time_seconds,self.light)
#assumes input for 'k' as 4-element bool np.array
# in the following order: [NORTH,WEST,SOUTH,EAST], where True is an active user input command
def __updatePodPosition(self,k,delta_time):
#position
pod_target=np.array([0,0])
is_x=False
is_y=False
IS_AIRPLANE_CONTROLS=True #True is up joystick means down motion
#if(k==ord('a')):
#pod_target[0]=-1
#is_x=True
#if(k==ord('d')):
#pod_target[0]=1
#is_x=True
#if(k==ord('s')):
#pod_target[1]=1
#is_y=True
#if(k==ord('w')):
#pod_target[1]=-1
#is_y=True
if(k[1]):
pod_target[0]=-1
is_x=True
if(k[3]):
pod_target[0]=1
is_x=True
if(k[2]):
if(IS_AIRPLANE_CONTROLS):
pod_target[1]=-1
else:
pod_target[1]=1
is_y=True
if(k[0]):
if(IS_AIRPLANE_CONTROLS):
pod_target[1]=1
else:
pod_target[1]=-1
is_y=True
delta_pod=pod_target*self.POD_TRANSLATION_PER_SECOND*delta_time*(0.707 if (is_x and is_y) else 1.0)
pod_pos=self.pod_offset+delta_pod
scale=np.linalg.norm(pod_pos)
if(scale>self.MAX_POD_DISPLACEMENT):
pod_pos=pod_pos*self.MAX_POD_DISPLACEMENT/scale
self.pod_offset=pod_pos
#rotation animatic
x_rate=self.pod_offset_rate[0] #x-translation, Z-rotation
delta_x=self.POD_ROTATION_DEGREES_PER_SECOND*delta_time
#if(k==ord('d')):#right
#delta_x=-delta_x
#elif(k==ord('a')):#left
#pass
if(k[3]):#right
delta_x=-delta_x
elif(k[1]):#left
pass
else:#neither, return to center
if(x_rate<0): delta_x=min(-x_rate,delta_x)
elif(x_rate>0): delta_x=max(-x_rate,-delta_x)
else: delta_x=0
self.pod_offset_rate[0]+=delta_x
y_rate=self.pod_offset_rate[1] #y-translation, Y-rotation
delta_y=self.POD_ROTATION_DEGREES_PER_SECOND*delta_time
#if(k==ord('s')):#up
#delta_y=-delta_y
#elif(k==ord('w')):#down
#pass
if(k[0]):#up
if(IS_AIRPLANE_CONTROLS):
pass
else:
delta_y=-delta_y
elif(k[2]):#down
if(IS_AIRPLANE_CONTROLS):
delta_y=-delta_y
else:
pass
else:#neither, return to center
if(y_rate<0): delta_y=min(-y_rate,delta_y)
elif(y_rate>0): delta_y=max(-y_rate,-delta_y)
else: delta_y=0
self.pod_offset_rate[1]+=delta_y
for itr in range(2): #bound rotation
self.pod_offset_rate[itr]=max(self.pod_offset_rate[itr],-self.POD_MAX_ROTATION[itr])
self.pod_offset_rate[itr]=min(self.pod_offset_rate[itr],self.POD_MAX_ROTATION[itr])
def __updateProps(self,level_elapsed_time_seconds):
prop_orientation=self.getPropOrientation(level_elapsed_time_seconds)
#light
light_pos=prop_orientation['light']['position']
self.light.position((light_pos[0],light_pos[1],light_pos[2]))
#pod
pod_pos=prop_orientation['pod']['position']
pod_rot=prop_orientation['pod']['rotation_euler']
self.pod.children[0].rotateToX(self.pod_offset_rate[1])
self.pod.children[0].rotateToZ(self.pod_offset_rate[0])
self.pod.position(pod_pos[0],pod_pos[1],pod_pos[2])
self.pod.rotateToX(pod_rot[0])
self.pod.rotateToY(pod_rot[1])
self.pod.rotateToZ(pod_rot[2])
self.pod.set_light(self.light)
#TO DO make recursive set_light method for pod
self.pod.children[0].set_light(self.light)
self.pod.children[0].children[0].set_light(self.light)
self.pod.children[0].children[0].children[0].set_light(self.light)
#camera
camera_pos=prop_orientation['camera']['position']
camera_rot=prop_orientation['camera']['rotation_euler']
self.camera.reset()
self.camera.position(camera_pos)
# print("SceneManager.__updateProps: camera_pos:",camera_pos)
self.camera.rotate(camera_rot[0],camera_rot[1],camera_rot[2])
def __drawSegments(self):
for segment in self.segment_list:
segment.draw()
def __updatePodSegment(self,level_elapsed_time_seconds):
while(self.pod_segment.getRatio(level_elapsed_time_seconds)>1):
self.pod_segment=self.pod_segment.getSuccessor()
if(self.pod_segment.is_branch): #when entering a branch, decide which path to take
is_left=self.pod_offset[0]<0
self.pod_segment.decideBranch(level_elapsed_time_seconds,is_left)
#print('is_left: ',self.pod_segment.isLeft())
self.pod_orientation=self.pod_segment.getOrientationAtTime(level_elapsed_time_seconds)
def __updateCameraSegment(self,level_elapsed_time_seconds):
camera_time=self.__getCameraTime(level_elapsed_time_seconds)
while(self.camera_segment.getRatio(camera_time)>1):
self.camera_segment=self.camera_segment.getSuccessor()
self.camera_orientation=self.camera_segment.getOrientationAtTime(camera_time)
def __getCameraTime(self,level_elapsed_time_seconds):
camera_lag_time=self.CAMERA_LAG_DISTANCE/(Segment.DISTANCE_BETWEEN_RINGS*Segment.RINGS_PER_SECOND)
camera_time=level_elapsed_time_seconds-camera_lag_time
return camera_time
def getSegmentAfter(self,prev_segment):
if(True): #create per config file
return self.getSegmentAfter_config(prev_segment)
else: #create randomly
return self.getSegmentAfter_random(prev_segment)
#note: is is assumed super method will populate the retuend segment's predecessor
def getSegmentAfter_config(self,prev_segment):
print("SceneManager.getSegmentAfter_config: prev_segment: ",prev_segment)
if(prev_segment is None):
next_segment=self.maze.getFirstPopulatedSegment(self.asset_library,0)#if no segment provided, return the first one
#precon: time is measured in seconds from the start of the current life
out_segment=[next_segment,None,None]
else:
end_point=prev_segment.getEndPoints()
prev_id=prev_segment.segment_id
prev2_id=-100 if prev_segment.predecessor is None else prev_segment.predecessor.segment_id #precon: the id of the segment before the first segment needs to be -100
next_segment_ids=self.maze.getSegmentIdAfter(prev2_id,prev_id)
print("SceneManager.getSegmentAfter_config: next_segment_ids: ",next_segment_ids)
was_branch=len(next_segment_ids)>1
out_segment=[None] if was_branch else [] #goal is to make either [None,Segment,Segment] for a branch, or [Segment,None,None] for straight
for itr in range(2 if was_branch else 1):#precon: only two paths come out of any one branch node
next_segment_def=next_segment_ids[itr]
next_segment=self.maze.getPopulatedSegment(next_segment_def["segment_id"],
next_segment_def["is_forward"],next_segment_def["is_branch"],
self.asset_library,end_point[itr]["position"],
end_point[itr]["rotation_matrix"],end_point[itr]["timestamp_seconds"])
out_segment.append(next_segment)
if(not was_branch):
out_segment.append(None)
out_segment.append(None)
return {'prev_segment':prev_segment,'next_segment':out_segment}
#TODO: is currently a placeholder for Maze...
#given a segment ID, return the parameters needed for the next segment
#input:
#Segment
#output:
#{'previous_segment':Segment,'next_segment':[Segment,Segment,Segment]}
# where previous_segment is the input
# and one of the following is True: 'next_segment'[0] is None OR 'next_segment'[1:2] is None
def getSegmentAfter_random(self,segment):
if(segment is None):
#return first segment
#TODO load from file
previous_segment=None
ring_count=7
segment=Segment(self.asset_library,False,np.array([0,0,0]),np.identity(3),0,
120,60,ring_count)
for ring_id in range(ring_count):
u=ring_id/ring_count
segment.addRingAssembly(self.asset_library,u,
ring_rotation_rate=RingAssembly.RING_ROTATION_DEGREES_PER_SECOND,
debris_rotation_rate=RingAssembly.DEBRIS_ROTATION_DEGREES_PER_SECOND)
next_segment=[segment,None,None]
else:
#this_segment_id=segment.segment_id
previous_segment=segment
ring_count=[2+random.randint(0,3),2+random.randint(0,3)]
curvature=[random.randint(0,30),random.randint(0,30)]
orientation=[random.randint(0,360),random.randint(0,360)]
was_branch=segment.is_branch #input segmenet was a branch
was_branch2=segment.predecessor is None or segment.predecessor.is_branch
#print('was_branch: ',was_branch)
is_branch=[random.randint(0,100)<20,random.randint(0,100)<20] #next segment is a branch
if(was_branch or was_branch2):
is_branch=[False,False]
#is_branch=[False,False]
end_point=segment.getEndPoints()
if(was_branch):
next_segment=[None]
for itr in range(2):
this_segment=Segment(self.asset_library,is_branch[itr],end_point[itr]['position'],
end_point[itr]['rotation_matrix'],end_point[itr]['timestamp_seconds'],
curvature[itr],orientation[itr],ring_count[itr])
next_segment.append(this_segment)
if(not is_branch[itr]):
for ring_id in range(ring_count[itr]):
u=ring_id/ring_count[itr]
this_segment.addRingAssembly(self.asset_library,u,
ring_rotation_rate=RingAssembly.RING_ROTATION_DEGREES_PER_SECOND,
debris_rotation_rate=RingAssembly.DEBRIS_ROTATION_DEGREES_PER_SECOND)
else:
next_segment=[]
this_segment=Segment(self.asset_library,is_branch[0],end_point[0]['position'],
end_point[0]['rotation_matrix'],end_point[0]['timestamp_seconds'],
curvature[0],orientation[0],ring_count[0])
next_segment.append(this_segment)
next_segment.append(None)
next_segment.append(None)
if(not is_branch[0]):
for ring_id in range(ring_count[0]):
u=ring_id/ring_count[0]
this_segment.addRingAssembly(self.asset_library,u,
ring_rotation_rate=RingAssembly.RING_ROTATION_DEGREES_PER_SECOND,
debris_rotation_rate=RingAssembly.DEBRIS_ROTATION_DEGREES_PER_SECOND)
#return next segment
return {'prev_segment':previous_segment,'next_segment':next_segment}
#return the start node, end node, progress and current segment_id
#return a pointer to the segment where the pod is currently located
#return: {"node_from":X,"node_to":Y,"ratio":Z} #ratio between nodes
def getPodStatus(self):
pass
#return the segment where the camera is currently located
def getCameraStatus(self):
pass
#dict with keys:
# pod
# camera
# light
# sub-keys:
# position
# rotation_matrix
# rotation_euler
#note: rotations have not been implemented for light
def getPropOrientation(self,level_elapsed_time_seconds):
#pod
pod_orientation=self.pod_segment.getOrientationAtTime(level_elapsed_time_seconds)
pod_position=pod_orientation["position"]
x_axis=pod_orientation["rotation_matrix"][0,:]
y_axis=pod_orientation["rotation_matrix"][1,:]
pod_position+=x_axis*self.pod_offset[0]
pod_position+=y_axis*self.pod_offset[1]
pod_orientation["position"]=pod_position
#camera
camera_orientation=self.camera_segment.getOrientationAtTime(self.__getCameraTime(level_elapsed_time_seconds))
x_axis=camera_orientation["rotation_matrix"][0,:]
y_axis=camera_orientation["rotation_matrix"][1,:]
position_camera=camera_orientation["position"]
camera_movement_scale=0.5
position_camera+=x_axis*self.pod_offset[0]*camera_movement_scale
position_camera+=y_axis*self.pod_offset[1]*camera_movement_scale
camera_orientation["position"]=position_camera
camera_orientation_to_target=Curve.euler_angles_from_vectors(pod_position-position_camera,'z',y_axis,'y')
camera_orientation["rotation_euler"]=camera_orientation_to_target["rotation_euler"]
camera_orientation["rotation_matrix"]=camera_orientation_to_target["rotation_matrix"]
#light
light_vect=np.array([10,-10,7])
light_vect = np.dot(camera_orientation["rotation_matrix"], light_vect) * [1.0, 1.0, -1.0] #https://github.com/tipam/pi3d/issues/220
light_orientation={'position':light_vect}
#laser...
return {'pod':pod_orientation,'camera':camera_orientation,'light':light_orientation}
#assumes inputs for navigation_joystick,camera_joystick,laser_joystick as 4-element bool np.arrays
# in the following order: [NORTH,WEST,SOUTH,EAST], where True is an active user input command
def update(self,this_frame_number,this_frame_elapsed_seconds,previous_frame_elapsed_seconds,packets,
navigation_joystick,camera_joystick,laser_joystick,is_fire_laser):
scene_state=self.scene['state']
level_elapsed_time_seconds=this_frame_elapsed_seconds-self.level_start_time_seconds
scene_start=self.scene['start_seconds'] #seconds
scene_end=self.scene['end_seconds']
delta_time=this_frame_elapsed_seconds-previous_frame_elapsed_seconds #time between frames
#advance from previous state to curent state
if(scene_end>=0 and level_elapsed_time_seconds>=scene_end):
if(scene_state==SCENE_STATE.INTRO or scene_state==SCENE_STATE.DEATH):
self.__setSceneState(SCENE_STATE.PLAY,this_frame_elapsed_seconds)
#make decisions based on current state
if(scene_end<=scene_start):
ratio=0.0
else:
ratio=(level_elapsed_time_seconds-scene_start)/(scene_end-scene_start)
self.scene['ratio']=ratio
if(scene_state==SCENE_STATE.INTRO):
pass #update pod, space ship, hyperspace effects
elif(scene_state==SCENE_STATE.OUTRO): #when transitioning TO outro, fade out music
if(ratio>=1):
self.is_done=True #stop music in exitChapter()
pass #update sphere of white
elif(scene_state==SCENE_STATE.DEATH):
pass #update sphere of black
else: #CUT_SCENE.PLAY
#if(this_frame_number%30==0):
# print('ring count: '+str(self.__getRingCount()))
self.__updateSegmentQueue(level_elapsed_time_seconds)
self.__updatePodSegment(level_elapsed_time_seconds)
self.__updateCameraSegment(level_elapsed_time_seconds)
#user input
#buttons=[]
#k=0
#while k>=0:
#k = sm.keys.read()
#buttons.append(k)
#k=max(buttons)
#temp=k
#is_smooth_motion_enabled=True
#if(is_smooth_motion_enabled):
#k=max(k,self.last_key)
#self.last_key=temp
k=-1 #temp disconnect from player controls
self.__updatePodPosition(navigation_joystick,delta_time)
self.__updateProps(level_elapsed_time_seconds)
self.__updateSegments(level_elapsed_time_seconds)
#if k==27:
# self.is_done=True
#TODO collissions
#update pod, camera, light, rings, branches, laser, asteroids...
def draw(self):
scene_state=self.scene['state']
ratio=self.scene['ratio']
if(scene_state==SCENE_STATE.INTRO):
self.pod.draw()
elif(scene_state==SCENE_STATE.OUTRO):
pass
elif(scene_state==SCENE_STATE.DEATH):
pass
else:
self.__drawSegments()#standard play scene
self.pod.draw()
#supported state transitions:
#intro to play
#play to death
#play to outro
#death to play
def __setSceneState(self,to_scene_state,this_frame_elapsed_seconds):
from_scene_state=self.scene['state']
level_elapsed_seconds=this_frame_elapsed_seconds-self.level_start_time_seconds
play_scene={'state':SCENE_STATE.PLAY,'start_seconds':level_elapsed_seconds,'end_seconds':-1,'ratio':0.0}
out_scene=None
if(to_scene_state==SCENE_STATE.PLAY):
if(from_scene_state==SCENE_STATE.INTRO): #intro -> play
out_scene=play_scene
#fade in/start music
elif(from_scene_state==SCENE_STATE.DEATH): #death -> play
out_scene=play_scene
self.segment_list=[] #clear segment list
self.life+=1
self.level_start_time_seconds=this_frame_elapsed_seconds
self.pod_segment=None
self.camera_segment=None
elif(to_scene_state==SCENE_STATE.DEATH): #play -> death
if(from_scene_state==SCENE_STATE.PLAY):
out_scene={'state':SCENE_STATE.DEATH,'start_seconds':level_elapsed_seconds,'end_seconds':level_elapsed_seconds+self.DEATH_SECONDS,'ratio':0.0}
elif(to_scene_state==SCENE_STATE.OUTRO):
if(from_scene_state==SCENE_STATE.PLAY): #play -> outro
out_scene={'state':SCENE_STATE.OUTRO,'start_seconds':level_elapsed_seconds,'end_seconds':level_elapsed_seconds+self.OUTRO_SECONDS,'ratio':0.0}
#fade out music
if(not out_scene is None):
self.scene=out_scene
return
raise NotImplementedError('SceneManager.__setSceneState(): Unable to transition from scene state: '+str(from_scene_state)+', to scene state: '+str(to_scene_state))
| 2.328125 | 2 |
database.py | Mdsid7/Student_Result_Analyzer_using_OOPS_and_Database | 3 | 12769826 | <reponame>Mdsid7/Student_Result_Analyzer_using_OOPS_and_Database<gh_stars>1-10
import mysql.connector
import string
mydb = mysql.connector.connect (
host="localhost",
user="root",
password="<PASSWORD>",
database="result"
)
mycursor=mydb.cursor()
def A_view():
mycursor.execute("SELECT * FROM A_section")
field_names = [i[0] for i in mycursor.description]
print(field_names)
myresult = mycursor.fetchall()
for x in myresult:
print(x)
def B_view():
mycursor.execute("SELECT * FROM B_section")
field_names = [i[0] for i in mycursor.description]
print(field_names)
myresult = mycursor.fetchall()
for x in myresult:
print(x)
def C_view():
mycursor.execute("SELECT * FROM C_section")
field_names = [i[0] for i in mycursor.description]
print(field_names)
myresult = mycursor.fetchall()
for x in myresult:
print(x)
def results(USN,USN2,USN3):
sql = "SELECT * FROM A_section where USN=%s UNION SELECT * FROM B_section where USN=%s UNION SELECT * FROM C_section WHERE USN=%s"
adr =(USN,USN2,USN3,)
mycursor.execute(sql,adr,)
field_names = [i[0] for i in mycursor.description]
print(field_names)
myresult = mycursor.fetchall()
for x in myresult:
print(x)
def insert_A_sec(Name,USN,Class,Section,DBMS,ADA,OOP,DSA,OS,SE,Average,Grade,SGPA):
sql = "insert into A_section(Name,USN,Class,Section,DBMS,ADA,OOP,DSA,OS,SE,Average,Grade,SGPA) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
val = (Name,USN,Class,Section,DBMS,ADA,OOP,DSA,OS,SE,Average,Grade,SGPA,)
mycursor.execute(sql, val)
mydb.commit()
print(mycursor.rowcount, "record inserted Successfully.")
def insert_B_sec(Name,USN,Class,Section,DBMS,ADA,OOP,DSA,OS,SE,Average,Grade,SGPA):
sql = "insert into B_section(Name,USN,Class,Section,DBMS,ADA,OOP,DSA,OS,SE,Average,Grade,SGPA) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
val = (Name,USN,Class,Section,DBMS,ADA,OOP,DSA,OS,SE,Average,Grade,SGPA,)
mycursor.execute(sql, val)
mydb.commit()
print(mycursor.rowcount, "record inserted Successfully.")
def insert_C_sec(Name,USN,Class,Section,DBMS,ADA,OOP,DSA,OS,SE,Average,Grade,SGPA):
sql = "insert into C_section(Name,USN,Class,Section,DBMS,ADA,OOP,DSA,OS,SE,Average,Grade,SGPA) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
val = (Name,USN,Class,Section,DBMS,ADA,OOP,DSA,OS,SE,Average,Grade,SGPA,)
mycursor.execute(sql, val)
mydb.commit()
print(mycursor.rowcount, "record inserted Successfully.")
def delete_A_sec(USN):
sql = "DELETE FROM A_section WHERE USN = %s"
adr = (USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) deleted")
def delete_B_sec(USN):
sql = "DELETE FROM B_section WHERE USN = %s"
adr = (USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) deleted")
def delete_C_sec(USN):
sql = "DELETE FROM C_section WHERE USN = %s"
adr = (USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) deleted")
def A_avg():
mycursor.execute("Select AVG(Average) FROM A_section")
a = mycursor.fetchone()
res=float('.'.join(str(ele)for ele in a))
print("\nThe average percentage of A_section students is: "+ str(res))
def B_avg():
mycursor.execute("Select AVG(Average) FROM B_section")
a = mycursor.fetchone()
res=float('.'.join(str(ele)for ele in a))
print("\nThe average percentage of B_section students is: "+ str(res))
def C_avg():
mycursor.execute("Select AVG(Average) FROM C_section")
a = mycursor.fetchone()
res=float('.'.join(str(ele)for ele in a))
print("\nThe average percentage of C_section students is: "+ str(res))
def A_top():
mycursor.execute("Select * FROM A_section WHERE SGPA = (SELECT MAX(SGPA) FROM A_section)")
print("\n The Topper Of the Class is :\n")
field_names = [i[0] for i in mycursor.description]
print(field_names)
a=mycursor.fetchall()
for x in a:
print(x)
def B_top():
mycursor.execute("Select * FROM B_section WHERE SGPA = (SELECT MAX(SGPA) FROM B_section)")
print("\n The Topper Of the Class is :\n")
field_names = [i[0] for i in mycursor.description]
print(field_names)
a=mycursor.fetchall()
for x in a:
print(x)
def C_top():
mycursor.execute("Select * FROM C_section WHERE SGPA = (SELECT MAX(SGPA) FROM C_section)")
print("\n The Topper Of the Class is :\n")
field_names = [i[0] for i in mycursor.description]
print(field_names)
a=mycursor.fetchall()
for x in a:
print(x)
def dbms_view():
mycursor.execute("SELECT Name,USN,Class,Section,DBMS FROM A_section UNION SELECT Name,USN,Class,Section,DBMS FROM B_section UNION SELECT Name,USN,Class,Section,DBMS FROM C_section")
field_names = [i[0] for i in mycursor.description]
print(field_names)
myresult = mycursor.fetchall()
for x in myresult:
print(x)
def ada_view():
mycursor.execute("SELECT Name,USN,Class,Section,ADA FROM A_section UNION SELECT Name,USN,Class,Section,ADA FROM B_section UNION SELECT Name,USN,Class,Section,ADA FROM C_section")
field_names = [i[0] for i in mycursor.description]
print(field_names)
myresult = mycursor.fetchall()
for x in myresult:
print(x)
def oop_view():
mycursor.execute("SELECT Name,USN,Class,Section,OOP FROM A_section UNION SELECT Name,USN,Class,Section,OOP FROM B_section UNION SELECT Name,USN,Class,Section,OOP FROM C_section")
field_names = [i[0] for i in mycursor.description]
print(field_names)
myresult = mycursor.fetchall()
for x in myresult:
print(x)
def dsa_view():
mycursor.execute("SELECT Name,USN,Class,Section,DSA FROM A_section UNION SELECT Name,USN,Class,Section,DSA FROM B_section UNION SELECT Name,USN,Class,Section,DSA FROM C_section")
field_names = [i[0] for i in mycursor.description]
print(field_names)
myresult = mycursor.fetchall()
for x in myresult:
print(x)
def os_view():
mycursor.execute("SELECT Name,USN,Class,Section,OS FROM A_section UNION SELECT Name,USN,Class,Section,OS FROM B_section UNION SELECT Name,USN,Class,Section,OS FROM C_section")
field_names = [i[0] for i in mycursor.description]
print(field_names)
myresult = mycursor.fetchall()
for x in myresult:
print(x)
def se_view():
mycursor.execute("SELECT Name,USN,Class,Section,SE FROM A_section UNION SELECT Name,USN,Class,Section,SE FROM B_section UNION SELECT Name,USN,Class,Section,SE FROM C_section")
field_names = [i[0] for i in mycursor.description]
print(field_names)
myresult = mycursor.fetchall()
for x in myresult:
print(x)
def dbms_avg():
mycursor.execute("Select AVG(DBMS) FROM ( Select DBMS FROM A_section UNION ALL Select DBMS FROM B_section UNION ALL Select DBMS FROM C_section) s ")
a = mycursor.fetchone()
res=float('.'.join(str(ele)for ele in a))
print("\nThe average percentage in DBMS is: "+ str(res))
def ada_avg():
mycursor.execute("Select AVG(ADA) FROM ( Select ADA FROM A_section UNION ALL Select ADA FROM B_section UNION ALL Select ADA FROM C_section) s ")
a = mycursor.fetchone()
res=float('.'.join(str(ele)for ele in a))
print("\nThe average percentage in ADA is: "+ str(res))
def oop_avg():
mycursor.execute("Select AVG(OOP) FROM ( Select OOP FROM A_section UNION ALL Select OOP FROM B_section UNION ALL Select OOP FROM C_section) s ")
a = mycursor.fetchone()
res=float('.'.join(str(ele)for ele in a))
print("\nThe average percentage in OOP is: "+ str(res))
def dsa_avg():
mycursor.execute("Select AVG(DSA) FROM ( Select DSA FROM A_section UNION ALL Select DSA FROM B_section UNION ALL Select DSA FROM C_section) s ")
a = mycursor.fetchone()
res=float('.'.join(str(ele)for ele in a))
print("\nThe average percentage in DSA is: "+ str(res))
def os_avg():
mycursor.execute("Select AVG(OS) FROM ( Select OS FROM A_section UNION ALL Select OS FROM B_section UNION ALL Select OS FROM C_section) s ")
a = mycursor.fetchone()
res=float('.'.join(str(ele)for ele in a))
print("\nThe average percentage in OS is: "+ str(res))
def se_avg():
mycursor.execute("Select AVG(SE) FROM ( Select SE FROM A_section UNION ALL Select SE FROM B_section UNION ALL Select SE FROM C_section) s ")
a = mycursor.fetchone()
res=float('.'.join(str(ele)for ele in a))
print("\nThe average percentage in SE is: "+ str(res))
def dbms_top():
mycursor.execute("Select Name,USN,Class,Section,DBMS FROM (SELECT Name,USN,Class,Section,DBMS FROM A_section WHERE DBMS = (SELECT MAX(DBMS) FROM A_section) UNION SELECT Name,USN,Class,Section,DBMS FROM B_section WHERE DBMS = (SELECT MAX(DBMS) FROM B_section) UNION SELECT Name,USN,Class,Section,DBMS FROM C_section WHERE DBMS = (SELECT MAX(DBMS) FROM C_section))s ORDER BY DBMS DESC LIMIT 1 ")
print("\n The Topper Of the DBMS is :\n")
field_names = [i[0] for i in mycursor.description]
print(field_names)
a = mycursor.fetchall()
for x in a:
print(x)
def ada_top():
mycursor.execute("Select Name,USN,Class,Section,ADA FROM (SELECT Name,USN,Class,Section,ADA FROM A_section WHERE ADA = (SELECT MAX(ADA) FROM A_section) UNION SELECT Name,USN,Class,Section,ADA FROM B_section WHERE ADA = (SELECT MAX(ADA) FROM B_section) UNION SELECT Name,USN,Class,Section,ADA FROM C_section WHERE ADA = (SELECT MAX(ADA) FROM C_section))s ORDER BY ADA DESC LIMIT 1 ")
print("\n The Topper Of the ADA is :\n")
field_names = [i[0] for i in mycursor.description]
print(field_names)
a = mycursor.fetchall()
for x in a:
print(x)
def oop_top():
mycursor.execute("Select Name,USN,Class,Section,OOP FROM (SELECT Name,USN,Class,Section,OOP FROM A_section WHERE OOP = (SELECT MAX(OOP) FROM A_section) UNION SELECT Name,USN,Class,Section,OOP FROM B_section WHERE OOP = (SELECT MAX(OOP) FROM B_section) UNION SELECT Name,USN,Class,Section,OOP FROM C_section WHERE OOP = (SELECT MAX(OOP) FROM C_section))s ORDER BY OOP DESC LIMIT 1 ")
print("\n The Topper Of the ADA is :\n")
field_names = [i[0] for i in mycursor.description]
print(field_names)
a = mycursor.fetchall()
for x in a:
print(x)
def dsa_top():
mycursor.execute("Select Name,USN,Class,Section,DSA FROM (SELECT Name,USN,Class,Section,DSA FROM A_section WHERE DSA = (SELECT MAX(DSA) FROM A_section) UNION SELECT Name,USN,Class,Section,DSA FROM B_section WHERE DSA = (SELECT MAX(DSA) FROM B_section) UNION SELECT Name,USN,Class,Section,DSA FROM C_section WHERE DSA = (SELECT MAX(DSA) FROM C_section))s ORDER BY DSA DESC LIMIT 1 ")
print("\n The Topper Of the DSA is :\n")
field_names = [i[0] for i in mycursor.description]
print(field_names)
a = mycursor.fetchall()
for x in a:
print(x)
def os_top():
mycursor.execute("Select Name,USN,Class,Section,OS FROM (SELECT Name,USN,Class,Section,OS FROM A_section WHERE OS = (SELECT MAX(OS) FROM A_section) UNION SELECT Name,USN,Class,Section,OS FROM B_section WHERE OS = (SELECT MAX(OS) FROM B_section) UNION SELECT Name,USN,Class,Section,OS FROM C_section WHERE OS = (SELECT MAX(OS) FROM C_section))s ORDER BY OS DESC LIMIT 1 ")
print("\n The Topper Of the OS is :\n")
field_names = [i[0] for i in mycursor.description]
print(field_names)
a = mycursor.fetchall()
for x in a:
print(x)
def se_top():
mycursor.execute("Select Name,USN,Class,Section,SE FROM (SELECT Name,USN,Class,Section,SE FROM A_section WHERE SE = (SELECT MAX(SE) FROM A_section) UNION SELECT Name,USN,Class,Section,SE FROM B_section WHERE SE = (SELECT MAX(SE) FROM B_section) UNION SELECT Name,USN,Class,Section,SE FROM C_section WHERE SE = (SELECT MAX(SE) FROM C_section))s ORDER BY SE DESC LIMIT 1 ")
print("\n The Topper Of the SE is :\n")
field_names = [i[0] for i in mycursor.description]
print(field_names)
a = mycursor.fetchall()
for x in a:
print(x)
def update_A_sectionName(Name,USN):
sql = "UPDATE A_section set Name=%s WHERE USN=%s"
adr = (Name,USN,)
mycursor.execute(sql,adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_B_sectionName(Name,USN):
sql = "UPDATE B_section set Name=%s WHERE USN = %s"
adr = (Name,USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_C_sectionName(Name,USN):
sql ="UPDATE C_section set Name = %s where USN=%s "
adr = (Name,USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_A_sectionUSN(USN,Name):
sql = "UPDATE A_section set USN = %s where Name=%s"
adr = (USN,Name,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_B_sectionUSN(USN,Name):
sql = "UPDATE B_section set USN = %s where Name=%s"
adr = (USN,Name,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_C_sectionUSN(USN,Name):
sql = "UPDATE C_section set USN = %s where Name=%s"
adr = (USN,Name,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_A_sectionClass(Class,USN):
sql = "UPDATE A_section set Class = %s where USN=%s"
adr = (Class,USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_B_sectionClass(Class,USN):
sql = "UPDATE B_section set Class = %s where USN=%s"
adr = (Class,USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_C_sectionClass(Class,USN):
sql = "UPDATE C_section set Class = %s where USN =%s"
adr = (Class,USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_A_sectionSection(Section,USN):
sql = "UPDATE A_section set Section = %s where USN =%s"
adr = (Section,USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_B_sectionSection(Section,USN):
sql = "UPDATE B_section set Section = %s where USN =%s"
adr = (Section,USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_C_sectionSection(Section,USN):
sql = "UPDATE C_section SET Section = %s where USN=%s"
adr = (Section,USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_A_sectionDBMS(DBMS,USN):
sql = "UPDATE A_section SET DBMS = %s where USN=%s"
adr = (DBMS,USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_B_sectionDBMS(DBMS,USN):
sql = "UPDATE B_section SET DBMS = %s where USN=%s"
adr = (DBMS,USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_C_sectionDBMS(DBMS,USN):
sql = "UPDATE C_section set DBMS = %s where USN=%s"
adr = (DBMS,USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_A_sectionADA(ADA,USN):
sql = "UPDATE A_section set ADA = %s where USN=%s"
adr = (ADA,USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_B_sectionADA(ADA,USN):
sql = "UPDATE B_section set ADA = %s where USN=%s"
adr = (ADA,USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_C_sectionADA(ADA,USN):
sql = "UPDATE C_section set ADA = %s where USN=%s"
adr = (ADA,USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_A_sectionOOPS(OOPS,USN):
sql = "UPDATE A_section set OOPS = %s where USN=%s"
adr = (OOPS,USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_B_sectionOOPS(OOPS,USN):
sql = "UPDATE B_section set OOPS = %s where USN=%s"
adr = (OOPS,USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_C_sectionOOPS(OOPS,USN):
sql = "UPDATE C_section set OOPS = %s where USN=%s"
adr = (OOPS,USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_A_sectionDSA(DSA,USN):
sql = "UPDATE A_section set DSA = %s where USN=%s"
adr = (DSA,USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_B_sectionDSA(DSA,USN):
sql = "UPDATE B_section set DSA = %s where USN=%s"
adr = (DSA,USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_C_sectionDSA(DSA,USN):
sql = "UPDATE C_section set DSA = %s where USN=%s"
adr = (DSA,USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_A_sectionOS(OS,USN):
sql = "UPDATE A_section set OS = %s where USN =%s"
adr = (OS,USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_B_sectionOS(OS,USN):
sql = "UPDATE B_section set OS = %s where USN =%s"
adr = (OS,USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_C_sectionOS(OS,USN):
sql = "UPDATE C_section set OS = %s where USN =%s"
adr = (OS,USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_A_sectionSE(SE,USN):
sql = "UPDATE A_section set SE = %s where USN=%s"
adr = (SE,USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_B_sectionSE(SE,USN):
sql = "UPDATE B_section set SE = %s where USN=%s"
adr = (SE,USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_C_sectionSE(SE,USN):
sql = "UPDATE C_section set SE = %s where USN=%s"
adr = (SE,USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_Average_A(Average,USN):
sql = "UPDATE A_section set Average = %s where USN=%s"
adr = (Average,USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_Average_B(Average,USN):
sql = "UPDATE B_section set Average = %s where USN=%s"
adr = (Average,USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_Average_C(Average,USN):
sql = "UPDATE C_section set Average = %s where USN=%s"
adr = (Average,USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_Grade_A(Grade,USN):
sql = "UPDATE A_section set Grade = %s where USN=%s"
adr = (Grade,USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_Grade_B(Grade,USN):
sql = "UPDATE B_section set Grade = %s where USN=%s"
adr = (Grade,USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_Grade_C(Grade,USN):
sql = "UPDATE C_section set Grade = %s where USN=%s"
adr = (Grade,USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_A_sectionSGPA(SGPA,USN):
sql = "UPDATE A_section set SGPA=%s where USN=%s"
adr = (SGPA,USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_B_sectionSGPA(SGPA,USN):
sql = "UPDATE B_section set SGPA=%s where USN=%s"
adr = (SGPA,USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
def update_C_sectionSGPA(SGPA,USN):
sql = "UPDATE C_section set SGPA=%s where USN=%s"
adr = (SGPA,USN,)
mycursor.execute(sql, adr)
mydb.commit()
print(mycursor.rowcount, "record(s) updated")
| 2.765625 | 3 |
python/pybool/chow_liu_trees.py | JohnReid/pybool | 5 | 12769827 | <reponame>JohnReid/pybool<filename>python/pybool/chow_liu_trees.py
#
# Copyright <NAME> 2011
#
"""
Chow-Liu Trees
==============
`Chow-Liu trees`_ were originally defined in <NAME>.; <NAME>. (1968),
"Approximating discrete probability distributions with dependence trees",
IEEE Transactions on Information Theory IT-14 (3): 462-467.
.. _Chow-Liu trees: http://en.wikipedia.org/wiki/Chow-Liu_tree
In this module, each data point is presented as a sequence of discrete-valued features. For example suppose we have data, X = {X},
where each x has n=4 features.
>>> X = [
... 'AACC',
... 'AAGC',
... 'AAGC',
... 'GCTC',
... 'ACTC',
... ]
>>> n = len(X[0])
We can calculate the marginal distribution of each feature
>>> import pybool.chow_liu_trees as CLT
>>> for u in xrange(n):
... print CLT.marginal_distribution(X, u)
defaultdict(<type 'float'>, {'A': 0.80000000000000004, 'G': 0.20000000000000001})
defaultdict(<type 'float'>, {'A': 0.60000000000000009, 'C': 0.40000000000000002})
defaultdict(<type 'float'>, {'C': 0.20000000000000001, 'T': 0.40000000000000002, 'G': 0.40000000000000002})
defaultdict(<type 'float'>, {'C': 1.0})
and also the marginal distribution of a pair of features
>>> print CLT.marginal_pair_distribution(X, 0, 1)
defaultdict(<type 'float'>, {('A', 'A'): 0.60000000000000009, ('G', 'C'): 0.20000000000000001, ('A', 'C'): 0.20000000000000001})
>>> print CLT.marginal_pair_distribution(X, 1, 2)
defaultdict(<type 'float'>, {('A', 'G'): 0.40000000000000002, ('C', 'T'): 0.40000000000000002, ('A', 'C'): 0.20000000000000001})
We can calculate the mutual infomation between all pairs of features
>>> for v in xrange(n):
... for u in xrange(v):
... print u, v, CLT.calculate_mutual_information(X, u, v)
0 1 0.223143551314
0 2 0.223143551314
1 2 0.673011667009
0 3 0.0
1 3 0.0
2 3 0.0
Finally we can build a Chow-Liu tree
>>> T = CLT.build_chow_liu_tree(X, n)
>>> print T.edges(data=True)
[(0, 1, {'weight': -0.22314355131420974}), (0, 3, {'weight': -0}), (1, 2, {'weight': -0.6730116670092563})]
"""
import numpy as N, networkx as nx
from collections import defaultdict
def marginal_distribution(X, u):
"""
Return the marginal distribution for the u'th features of the data points, X.
"""
values = defaultdict(float)
s = 1. / len(X)
for x in X:
values[x[u]] += s
return values
def marginal_pair_distribution(X, u, v):
"""
Return the marginal distribution for the u'th and v'th features of the data points, X.
"""
if u > v:
u, v = v, u
values = defaultdict(float)
s = 1. / len(X)
for x in X:
values[(x[u], x[v])] += s
return values
def calculate_mutual_information(X, u, v):
"""
X are the data points.
u and v are the indices of the features to calculate the mutual information for.
"""
if u > v:
u, v = v, u
marginal_u = marginal_distribution(X, u)
marginal_v = marginal_distribution(X, v)
marginal_uv = marginal_pair_distribution(X, u, v)
I = 0.
for x_u, p_x_u in marginal_u.iteritems():
for x_v, p_x_v in marginal_v.iteritems():
if (x_u, x_v) in marginal_uv:
p_x_uv = marginal_uv[(x_u, x_v)]
I += p_x_uv * (N.log(p_x_uv) - N.log(p_x_u) - N.log(p_x_v))
return I
def build_chow_liu_tree(X, n):
"""
Build a Chow-Liu tree from the data, X. n is the number of features. The weight on each edge is
the negative of the mutual information between those features. The tree is returned as a networkx
object.
"""
G = nx.Graph()
for v in xrange(n):
G.add_node(v)
for u in xrange(v):
G.add_edge(u, v, weight=-calculate_mutual_information(X, u, v))
T = nx.minimum_spanning_tree(G)
return T
if '__main__' == __name__:
import doctest
doctest.testmod()
| 3.078125 | 3 |
tensorflow_serving/servables/caffe/test_data/mnist_caffe_fetch.py | rayglover-ibm/serving-caffe | 48 | 12769828 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#!/usr/bin/python2.7
"""Functions for downloading and extracting pretrained MNIST caffe models."""
from __future__ import print_function
import argparse
import tarfile
import os
from six.moves import urllib
VERSION_FORMAT_SPECIFIER = "%08d"
SOURCE_URL = 'https://github.com/rayglover-ibm/serving-caffe/raw/pretrained-models/mnist_pretrained_caffe.tar'
OUT_FILE = 'mnist_pretrained_caffe.tar'
MODEL_FILES = ['classlabels.txt', 'deploy.prototxt', 'weights.caffemodel']
def maybe_download(url, filename, work_directory):
"""Download the data"""
if not os.path.exists(work_directory):
os.mkdir(work_directory)
filepath = os.path.join(work_directory, filename)
if not os.path.exists(filepath):
filepath, _ = urllib.request.urlretrieve(SOURCE_URL, filepath)
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
return filepath
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("export_path", help="location to download and extract the model")
parser.add_argument("--version", type=int, default=1, help="model version")
args = parser.parse_args()
export_dir = os.path.join(args.export_path, VERSION_FORMAT_SPECIFIER % args.version)
if os.path.exists(export_dir):
raise RuntimeError("Overwriting exports can cause corruption and are "
"not allowed. Duplicate export dir: %s" % export_dir)
os.makedirs(export_dir)
print('Downloading...', SOURCE_URL)
filename = maybe_download(SOURCE_URL, OUT_FILE, export_dir)
print('Extracting "%s" to "%s"' % (filename, export_dir))
with tarfile.open(filename) as tar:
tar.extractall(path=export_dir)
for p in MODEL_FILES:
if not os.path.isfile(os.path.join(export_dir, p)):
raise FileNotFoundError("Expected model file '%s'" % p)
| 1.992188 | 2 |
beginning-game-development/Chapter 3/3-1.py | CrtomirJuren/pygame-projects | 43 | 12769829 | <reponame>CrtomirJuren/pygame-projects
#!/usr/bin/env python
background_image_filename = 'sushiplate.jpg'
mouse_image_filename = 'fugu.png'
import pygame
from pygame.locals import *
from sys import exit
pygame.init()
screen = pygame.display.set_mode((640, 480), 0, 32)
pygame.display.set_caption("Hello, World!")
background = pygame.image.load(background_image_filename).convert()
mouse_cursor = pygame.image.load(mouse_image_filename).convert_alpha()
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
exit()
screen.blit(background, (0,0))
x, y = pygame.mouse.get_pos()
x-= mouse_cursor.get_width() / 2
y-= mouse_cursor.get_height() / 2
screen.blit(mouse_cursor, (x, y))
pygame.display.update()
| 3.28125 | 3 |
test.py | Joshuawadd/rlcard | 0 | 12769830 | import rlcard
rlcard.make('whist', config={'record_action': True}) | 1.320313 | 1 |
goutdotcom/profiles/tests/test_models.py | Spiewart/goutdotcom | 0 | 12769831 | <gh_stars>0
import pytest
from .factories import PatientProfileFactory
pytestmark = pytest.mark.django_db
| 1.046875 | 1 |
examples/sock/latency.py | iliar-rabet/dao-projection | 1 | 12769832 | <filename>examples/sock/latency.py
#!/usr/bin/python3
from datetime import datetime
import sys
import numpy
PKTS=300
slip=2
try:
if sys.argv[1]:
fileName = sys.argv[1]
except IndexError:
print("Using default file name.")
fileName = 'loglistener.txt'
f = open(fileName,"r")
#f.close()
def test():
list=[]
summ=0
first=0
txcounter=0.0
rxcounter=[0]*PKTS
min=1000000
max=0
for i in range(1,PKTS):
f.seek(0)
print(i)
dTime=0
for line in f.readlines():
hello = "hello " + str(i)
if hello in line:
if "sending "+hello +" from fdf8:f53e:61e4::18:7401:1" in line:
sTime = datetime.strptime(line[0:9], '%M:%S.%f')
txcounter+=1
print("add tx 1")
print (line)
if "ID:"+str(slip) in line and hello + " from fdf8:f53e:61e4::18:7401:1" in line:
if(first==0):
first=i
print("add rx 1")
print (line)
rTime = datetime.strptime(line[0:9], '%M:%S.%f')
dTime=rTime-sTime
dTime=dTime.seconds*1000000+dTime.microseconds
rxcounter[i]+=1
if(min>dTime):
min=dTime
if(max<dTime):
max=dTime
list.append(dTime)
print("delay:"+str(dTime)+"\n")
break
# if "sending "+hello+" from fdf8:f53e:61e4::18:7402:2" in line:
# sTime = datetime.strptime(line[0:9], '%M:%S.%f')
# txcounter+=1
# print("add tx 2")
# print (line)
# if "ID:"+str(slip) in line and hello + " from fdf8:f53e:61e4::18:7402:2" in line:
# if(first==0):
# first=i
# print("add rx 2")
# print (line)
# rTime = datetime.strptime(line[0:9], '%M:%S.%f')
# dTime=rTime-sTime
# dTime=dTime.seconds*1000000+dTime.microseconds
# rxcounter[i]+=1
# if(min>dTime):
# min=dTime
# if(max<dTime):
# max=dTime
# list.append(dTime)
# print("delay:"+str(dTime)+"\n")
# if "sending "+hello +" from fdf8:f53e:61e4::18:7403:3" in line:
# sTime = datetime.strptime(line[0:9], '%M:%S.%f')
# txcounter+=1
# print("add tx 3")
# print (line)
# if "ID:"+str(slip) in line and hello + " from fdf8:f53e:61e4::18:7403:3" in line:
# if(first==0):
# first=i
# print("add rx 3")
# print (line)
# rTime = datetime.strptime(line[0:9], '%M:%S.%f')
# dTime=rTime-sTime
# dTime=dTime.seconds*1000000+dTime.microseconds
# rxcounter[i]+=1
# if(min>dTime):
# min=dTime
# if(max<dTime):
# max=dTime
# list.append(dTime)
# print("delay:"+str(dTime)+"\n")
# break
summ=summ+int(dTime)
rx=0
for el in rxcounter:
rx+=el
print("avg="+str(summ/rx)+"\n")
print("max:"+str(max)+ " Min:"+str(min)," StdDev:"+str(numpy.std(list)))
print(rx)
print(txcounter)
print("PDR="+str((rx)/(txcounter))+"\n")
if __name__ == '__main__' :
test()
| 2.75 | 3 |
code/day06.py | nbhh1234/test1 | 0 | 12769833 | v0,v1,t=eval(raw_input('Enter v0,v1,and t:'))
a=(v1-v0)/t
print('The average acceleration is {}'.format(a))
| 3.453125 | 3 |
challenge-1.py | matt-kendall/python-challenge | 0 | 12769834 | """
Python Challenge Level 1.
An image of a notepad with what appear to be letter transformations:
K -> M
O -> Q
E -> G
And the following hint:
'g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc dmp. bmgle gr
gl zw fylb gq glcddgagclr ylb rfyr'q ufw rfgq rcvr gq qm jmle. sqgle
qrpgle.kyicrpylq() gq pcamkkclbcb. lmu ynnjw ml rfc spj.''
"""
from string import ascii_lowercase
HINT = "g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc " \
"dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw rfgq rcvr " \
"gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. " \
"lmu ynnjw ml rfc spj."
ALPHA_SHIFT = 2 # The clue implies a caesar cipher of shift 2.
def shift_alphabet(string, shift):
"""
Shift an input strings letters through the alphabet by a specified shift.
"""
old_map = ascii_lowercase
new_map = old_map[shift:] + old_map[:shift]
return string.translate(str.maketrans(old_map, new_map))
print(shift_alphabet(HINT, ALPHA_SHIFT))
# 'i hope you didnt translate it by hand. thats what computers are for.
# doing it in by hand is inefficient and that's why this text is so long.
# using string.maketrans() is recommended. now apply on the url.''
# Performing the same substitution on the URL ('map'):
URL = 'map'
print(shift_alphabet(URL, ALPHA_SHIFT))
# 'ocr'
# http://www.pythonchallenge.com/pc/def/ocr.html is the next URL.
| 3.890625 | 4 |
site_config/migrations/0015_auto_20171107_1326.py | LaudateCorpus1/apostello | 69 | 12769835 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-07 13:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("site_config", "0014_auto_20171025_1053")]
operations = [
migrations.AlterField(
model_name="siteconfiguration",
name="email_from",
field=models.EmailField(
blank=True, help_text="Email will be sent from this address.", max_length=254, null=True
),
),
migrations.AlterField(
model_name="siteconfiguration",
name="email_host",
field=models.CharField(blank=True, help_text="Email host.", max_length=255, null=True),
),
migrations.AlterField(
model_name="siteconfiguration",
name="email_password",
field=models.CharField(blank=True, help_text="Email password.", max_length=255, null=True),
),
migrations.AlterField(
model_name="siteconfiguration",
name="email_port",
field=models.PositiveIntegerField(blank=True, help_text="Email host port.", null=True),
),
migrations.AlterField(
model_name="siteconfiguration",
name="email_username",
field=models.CharField(blank=True, help_text="Email user name.", max_length=255, null=True),
),
]
| 1.726563 | 2 |
beproud/django/commons/models/__init__.py | beproud/bpcommons | 2 | 12769836 | from __future__ import absolute_import
from .base import *
from .fields import *
from .utils import *
| 1.09375 | 1 |
code/Rek_UpdateBlacklist.py | aws-samples/amazon-rekognition-reviewing-user-content | 8 | 12769837 | from __future__ import print_function
import boto3
from decimal import Decimal
import json
import urllib
import os
print('Loading function')
rekognition = boto3.client('rekognition')
s3 = boto3.client('s3')
# --------------- Helper Functions to call Rekognition APIs ------------------
def add_image_to_Collection(bucket, key, prefix):
doesBlackListImagesExist = False
# Get all the collections
response = rekognition.list_collections(MaxResults=100)
for collectionId in response['CollectionIds']:
if(collectionId == 'BlackListImages'):
doesBlackListImagesExist = True
# Create a blacklist collection
if not doesBlackListImagesExist:
#print('Creating collection : BlackListImages')
rekognition.create_collection(CollectionId='BlackListImages')
# Since the collection did not exist, add the existing images from the blacklist bucket, to the blacklist image collection
#print('Adding BlackList Images')
imageList = s3.list_objects_v2(
Bucket=bucket, Prefix=prefix)
#print(json.dumps(imageList,indent=4, separators=(',', ': ')))
#print(imageList)
if imageList['Contents'] is not None:
for image in imageList['Contents']:
if(image['Size'] == 0):
continue
print('Adding ' + bucket + '/' + image['Key'])
rekognition.index_faces(CollectionId='BlackListImages', Image={"S3Object": {
"Bucket": bucket, "Name": image['Key']}})
else:
# Just add the image which fired the Lambda function.
print('Adding ' + bucket + '/' + key)
rekognition.index_faces(CollectionId='BlackListImages', Image={"S3Object": {
"Bucket": bucket, "Name": key}})
return None
# --------------- Main handler ------------------
def lambda_handler(event, context):
#print("Received event: " + json.dumps(event, indent=2))
# Get the object from the event
bucket = event['Records'][0]['s3']['bucket']['name']
key = urllib.unquote_plus(
event['Records'][0]['s3']['object']['key'].encode('utf8'))
prefix = key[:key.rfind('/')]
try:
# try:
# # delete collections. REMOVE IN PROD.
# rekognition.delete_collection(CollectionId ='BlackListImages')
# except Exception as e:
# print ('Error deleting collections.')
# Create image collections.
add_image_to_Collection(bucket, key, prefix)
# Print response to console.
# print(response)
return True
except Exception as e:
print(e)
print("Error processing object {} from bucket {}. ".format(key, bucket) +
"Make sure your object and bucket exist and your bucket is in the same region as this function.")
raise e
| 2.75 | 3 |
xsd-fu/python/generateDS/Demos/Xmlbehavior/po.py | jburel/ome-model | 11 | 12769838 | #!/usr/bin/env python
#
# Generated Fri Jul 2 13:32:06 2004 by generateDS.py.
#
import sys
import getopt
from xml.dom import minidom
from xml.dom import Node
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Support/utility functions.
#
def showIndent(outfile, level):
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
s1 = inStr
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('"', '"')
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
#
# Data representation classes.
#
class purchase_order:
subclass = None
def __init__(self, customer=None, date='', line_item=None, shipper=None):
self.customer = customer
self.date = date
if line_item is None:
self.line_item = []
else:
self.line_item = line_item
self.shipper = shipper
def factory(*args_, **kwargs_):
if purchase_order.subclass:
return purchase_order.subclass(*args_, **kwargs_)
else:
return purchase_order(*args_, **kwargs_)
factory = staticmethod(factory)
def getCustomer(self): return self.customer
def setCustomer(self, customer): self.customer = customer
def getDate(self): return self.date
def setDate(self, date): self.date = date
def getLine_item(self): return self.line_item
def addLine_item(self, value): self.line_item.append(value)
def setLine_item(self, index, value): self.line_item[index] = value
def getShipper(self): return self.shipper
def setShipper(self, shipper): self.shipper = shipper
def export(self, outfile, level, name_='purchase-order'):
showIndent(outfile, level)
outfile.write('<%s>\n' % name_)
level += 1
if self.customer:
self.customer.export(outfile, level, name_='customer')
showIndent(outfile, level)
outfile.write('<date>%s</date>\n' % quote_xml(self.getDate()))
for line_item in self.line_item:
line_item.export(outfile, level, name_='line-item')
if self.shipper:
self.shipper.export(outfile, level, name_='shipper')
level -= 1
showIndent(outfile, level)
outfile.write('</%s>\n' % name_)
def exportLiteral(self, outfile, level, name_='purchase-order'):
level += 1
if self.customer:
showIndent(outfile, level)
outfile.write('customer=po:customer(\n')
self.customer.exportLiteral(outfile, level, name_='customer')
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('date=%s,\n' % quote_python(self.getDate()))
showIndent(outfile, level)
outfile.write('line_item=[\n')
level += 1
for line_item in self.line_item:
showIndent(outfile, level)
outfile.write('po:line-item(\n')
line_item.exportLiteral(outfile, level, name_='line_item')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.shipper:
showIndent(outfile, level)
outfile.write('shipper=po:shipper(\n')
self.shipper.exportLiteral(outfile, level, name_='shipper')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
def build(self, node_):
attrs = node_.attributes
for child in node_.childNodes:
nodeName_ = child.nodeName.split(':')[-1]
if child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'customer':
obj = customer.factory()
obj.build(child)
self.setCustomer(obj)
elif child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'date':
date = ''
for text_ in child.childNodes:
date += text_.nodeValue
self.date = date
elif child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'line-item':
obj = line_item.factory()
obj.build(child)
self.line_item.append(obj)
elif child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'shipper':
obj = shipper.factory()
obj.build(child)
self.setShipper(obj)
# end class purchase_order
class customer:
subclass = None
def __init__(self, name='', address=''):
self.name = name
self.address = address
def factory(*args_, **kwargs_):
if customer.subclass:
return customer.subclass(*args_, **kwargs_)
else:
return customer(*args_, **kwargs_)
factory = staticmethod(factory)
def getName(self): return self.name
def setName(self, name): self.name = name
def getAddress(self): return self.address
def setAddress(self, address): self.address = address
def export(self, outfile, level, name_='customer'):
showIndent(outfile, level)
outfile.write('<%s>\n' % name_)
level += 1
showIndent(outfile, level)
outfile.write('<name>%s</name>\n' % quote_xml(self.getName()))
showIndent(outfile, level)
outfile.write('<address>%s</address>\n' % quote_xml(self.getAddress()))
level -= 1
showIndent(outfile, level)
outfile.write('</%s>\n' % name_)
def exportLiteral(self, outfile, level, name_='customer'):
level += 1
showIndent(outfile, level)
outfile.write('name=%s,\n' % quote_python(self.getName()))
showIndent(outfile, level)
outfile.write('address=%s,\n' % quote_python(self.getAddress()))
level -= 1
def build(self, node_):
attrs = node_.attributes
for child in node_.childNodes:
nodeName_ = child.nodeName.split(':')[-1]
if child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'name':
name = ''
for text_ in child.childNodes:
name += text_.nodeValue
self.name = name
elif child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'address':
address = ''
for text_ in child.childNodes:
address += text_.nodeValue
self.address = address
# end class customer
class line_item:
subclass = None
def __init__(self, description='', per_unit_ounces=0.0, price=0.0, quantity=-1):
self.description = description
self.per_unit_ounces = per_unit_ounces
self.price = price
self.quantity = quantity
def factory(*args_, **kwargs_):
if line_item.subclass:
return line_item.subclass(*args_, **kwargs_)
else:
return line_item(*args_, **kwargs_)
factory = staticmethod(factory)
def getDescription(self): return self.description
def setDescription(self, description): self.description = description
def getPer_unit_ounces(self): return self.per_unit_ounces
def setPer_unit_ounces(self, per_unit_ounces): self.per_unit_ounces = per_unit_ounces
def getPrice(self): return self.price
def setPrice(self, price): self.price = price
def getQuantity(self): return self.quantity
def setQuantity(self, quantity): self.quantity = quantity
def export(self, outfile, level, name_='line-item'):
showIndent(outfile, level)
outfile.write('<%s>\n' % name_)
level += 1
showIndent(outfile, level)
outfile.write('<description>%s</description>\n' % quote_xml(self.getDescription()))
showIndent(outfile, level)
outfile.write('<per-unit-ounces>%f</per-unit-ounces>\n' % self.getPer_unit_ounces())
showIndent(outfile, level)
outfile.write('<price>%e</price>\n' % self.getPrice())
showIndent(outfile, level)
outfile.write('<quantity>%d</quantity>\n' % self.getQuantity())
level -= 1
showIndent(outfile, level)
outfile.write('</%s>\n' % name_)
def exportLiteral(self, outfile, level, name_='line-item'):
level += 1
showIndent(outfile, level)
outfile.write('description=%s,\n' % quote_python(self.getDescription()))
showIndent(outfile, level)
outfile.write('per_unit_ounces=%f,\n' % self.getPer_unit_ounces())
showIndent(outfile, level)
outfile.write('price=%e,\n' % self.getPrice())
showIndent(outfile, level)
outfile.write('quantity=%d,\n' % self.getQuantity())
level -= 1
def build(self, node_):
attrs = node_.attributes
for child in node_.childNodes:
nodeName_ = child.nodeName.split(':')[-1]
if child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'description':
description = ''
for text_ in child.childNodes:
description += text_.nodeValue
self.description = description
elif child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'per-unit-ounces':
if child.firstChild:
sval = child.firstChild.nodeValue
try:
fval = float(sval)
except ValueError:
raise ValueError('requires float (or double) -- %s' % child.toxml())
self.per_unit_ounces = fval
elif child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'price':
if child.firstChild:
sval = child.firstChild.nodeValue
try:
fval = float(sval)
except ValueError:
raise ValueError('requires float (or double) -- %s' % child.toxml())
self.price = fval
elif child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'quantity':
if child.firstChild:
sval = child.firstChild.nodeValue
try:
ival = int(sval)
except ValueError:
raise ValueError('requires integer -- %s' % child.toxml())
self.quantity = ival
# end class line_item
class shipper:
subclass = None
def __init__(self, name='', per_ounce_rate=0.0):
self.name = name
self.per_ounce_rate = per_ounce_rate
def factory(*args_, **kwargs_):
if shipper.subclass:
return shipper.subclass(*args_, **kwargs_)
else:
return shipper(*args_, **kwargs_)
factory = staticmethod(factory)
def getName(self): return self.name
def setName(self, name): self.name = name
def getPer_ounce_rate(self): return self.per_ounce_rate
def setPer_ounce_rate(self, per_ounce_rate): self.per_ounce_rate = per_ounce_rate
def export(self, outfile, level, name_='shipper'):
showIndent(outfile, level)
outfile.write('<%s>\n' % name_)
level += 1
showIndent(outfile, level)
outfile.write('<name>%s</name>\n' % quote_xml(self.getName()))
showIndent(outfile, level)
outfile.write('<per-ounce-rate>%f</per-ounce-rate>\n' % self.getPer_ounce_rate())
level -= 1
showIndent(outfile, level)
outfile.write('</%s>\n' % name_)
def exportLiteral(self, outfile, level, name_='shipper'):
level += 1
showIndent(outfile, level)
outfile.write('name=%s,\n' % quote_python(self.getName()))
showIndent(outfile, level)
outfile.write('per_ounce_rate=%f,\n' % self.getPer_ounce_rate())
level -= 1
def build(self, node_):
attrs = node_.attributes
for child in node_.childNodes:
nodeName_ = child.nodeName.split(':')[-1]
if child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'name':
name = ''
for text_ in child.childNodes:
name += text_.nodeValue
self.name = name
elif child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'per-ounce-rate':
if child.firstChild:
sval = child.firstChild.nodeValue
try:
fval = float(sval)
except ValueError:
raise ValueError('requires float (or double) -- %s' % child.toxml())
self.per_ounce_rate = fval
# end class shipper
from xml.sax import handler, make_parser
class SaxStackElement:
def __init__(self, name='', obj=None):
self.name = name
self.obj = obj
self.content = ''
#
# SAX handler
#
class SaxPurchase_orderHandler(handler.ContentHandler):
def __init__(self):
self.stack = []
self.root = None
def getRoot(self):
return self.root
def setDocumentLocator(self, locator):
self.locator = locator
def showError(self, msg):
print '*** (showError):', msg
sys.exit(-1)
def startElement(self, name, attrs):
done = 0
if name == 'purchase-order':
obj = purchase-order.factory()
stackObj = SaxStackElement('purchase-order', obj)
self.stack.append(stackObj)
done = 1
elif name == 'customer':
obj = po_customer.factory()
stackObj = SaxStackElement('customer', obj)
self.stack.append(stackObj)
done = 1
elif name == 'date':
stackObj = SaxStackElement('date', None)
self.stack.append(stackObj)
done = 1
elif name == 'line-item':
obj = po_line_item.factory()
stackObj = SaxStackElement('line_item', obj)
self.stack.append(stackObj)
done = 1
elif name == 'shipper':
obj = po_shipper.factory()
stackObj = SaxStackElement('shipper', obj)
self.stack.append(stackObj)
done = 1
elif name == 'name':
stackObj = SaxStackElement('name', None)
self.stack.append(stackObj)
done = 1
elif name == 'address':
stackObj = SaxStackElement('address', None)
self.stack.append(stackObj)
done = 1
elif name == 'description':
stackObj = SaxStackElement('description', None)
self.stack.append(stackObj)
done = 1
elif name == 'per-unit-ounces':
stackObj = SaxStackElement('per_unit_ounces', None)
self.stack.append(stackObj)
done = 1
elif name == 'price':
stackObj = SaxStackElement('price', None)
self.stack.append(stackObj)
done = 1
elif name == 'quantity':
stackObj = SaxStackElement('quantity', None)
self.stack.append(stackObj)
done = 1
elif name == 'per-ounce-rate':
stackObj = SaxStackElement('per_ounce_rate', None)
self.stack.append(stackObj)
done = 1
if not done:
self.reportError('"%s" element not allowed here.' % name)
def endElement(self, name):
done = 0
if name == 'purchase-order':
if len(self.stack) == 1:
self.root = self.stack[-1].obj
self.stack.pop()
done = 1
elif name == 'customer':
if len(self.stack) >= 2:
self.stack[-2].obj.setCustomer(self.stack[-1].obj)
self.stack.pop()
done = 1
elif name == 'date':
if len(self.stack) >= 2:
content = self.stack[-1].content
self.stack[-2].obj.setDate(content)
self.stack.pop()
done = 1
elif name == 'line-item':
if len(self.stack) >= 2:
self.stack[-2].obj.addLine_item(self.stack[-1].obj)
self.stack.pop()
done = 1
elif name == 'shipper':
if len(self.stack) >= 2:
self.stack[-2].obj.setShipper(self.stack[-1].obj)
self.stack.pop()
done = 1
elif name == 'name':
if len(self.stack) >= 2:
content = self.stack[-1].content
self.stack[-2].obj.setName(content)
self.stack.pop()
done = 1
elif name == 'address':
if len(self.stack) >= 2:
content = self.stack[-1].content
self.stack[-2].obj.setAddress(content)
self.stack.pop()
done = 1
elif name == 'description':
if len(self.stack) >= 2:
content = self.stack[-1].content
self.stack[-2].obj.setDescription(content)
self.stack.pop()
done = 1
elif name == 'per-unit-ounces':
if len(self.stack) >= 2:
content = self.stack[-1].content
if content:
try:
content = float(content)
except:
self.reportError('"per-unit-ounces" must be float -- content: %s' % content)
else:
content = -1
self.stack[-2].obj.setPer_unit_ounces(content)
self.stack.pop()
done = 1
elif name == 'price':
if len(self.stack) >= 2:
content = self.stack[-1].content
if content:
try:
content = float(content)
except:
self.reportError('"price" must be float -- content: %s' % content)
else:
content = -1
self.stack[-2].obj.setPrice(content)
self.stack.pop()
done = 1
elif name == 'quantity':
if len(self.stack) >= 2:
content = self.stack[-1].content
if content:
try:
content = int(content)
except:
self.reportError('"quantity" must be integer -- content: %s' % content)
else:
content = -1
self.stack[-2].obj.setQuantity(content)
self.stack.pop()
done = 1
elif name == 'per-ounce-rate':
if len(self.stack) >= 2:
content = self.stack[-1].content
if content:
try:
content = float(content)
except:
self.reportError('"per-ounce-rate" must be float -- content: %s' % content)
else:
content = -1
self.stack[-2].obj.setPer_ounce_rate(content)
self.stack.pop()
done = 1
if not done:
self.reportError('"%s" element not allowed here.' % name)
def characters(self, chrs, start, end):
if len(self.stack) > 0:
self.stack[-1].content += chrs[start:end]
def reportError(self, mesg):
locator = self.locator
sys.stderr.write('Doc: %s Line: %d Column: %d\n' % \
(locator.getSystemId(), locator.getLineNumber(),
locator.getColumnNumber() + 1))
sys.stderr.write(mesg)
sys.stderr.write('\n')
sys.exit(-1)
#raise RuntimeError
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
Options:
-s Use the SAX parser, not the minidom parser.
"""
def usage():
print USAGE_TEXT
sys.exit(-1)
def saxParse(inFileName):
parser = make_parser()
documentHandler = SaxPurchase_orderHandler()
parser.setDocumentHandler(documentHandler)
parser.parse('file:%s' % inFileName)
root = documentHandler.getRoot()
sys.stdout.write('<?xml version="1.0" ?>\n')
root.export(sys.stdout, 0)
return root
def saxParseString(inString):
parser = make_parser()
documentHandler = SaxPurchase_orderHandler()
parser.setDocumentHandler(documentHandler)
parser.feed(inString)
parser.close()
rootObj = documentHandler.getRoot()
#sys.stdout.write('<?xml version="1.0" ?>\n')
#rootObj.export(sys.stdout, 0)
return rootObj
def parse(inFileName):
doc = minidom.parse(inFileName)
rootNode = doc.childNodes[0]
rootObj = purchase_order.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0)
return rootObj
def parseString(inString):
doc = minidom.parseString(inString)
rootNode = doc.childNodes[0]
rootObj = purchase_order.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0)
return rootObj
def parseLiteral(inFileName):
doc = minidom.parse(inFileName)
rootNode = doc.childNodes[0]
rootObj = purchase_order.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('from po import *\n\n')
sys.stdout.write('rootObj = purchase_order(\n')
rootObj.exportLiteral(sys.stdout, 0)
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 2 and args[0] == '-s':
saxParse(args[1])
elif len(args) == 1:
parseLiteral(args[0])
else:
usage()
if __name__ == '__main__':
main()
#import pdb
#pdb.run('main()')
| 2.6875 | 3 |
ChamarScriptLinux/Main.py | LeandroTeodoroRJ/ExemplosPython | 1 | 12769839 | <filename>ChamarScriptLinux/Main.py
######################################################
# Programa principal em Python
######################################################
import os #Importa módulo
var="Param1" #Parâmetro que será passado para o shell
os.system("./LinuxScript.sh %s" %var) #Chamada do script com o parâmetro
| 2.296875 | 2 |
MI_analyzer.py | DanielMartinAlarcon/influenza_RNA_structure | 0 | 12769840 | <filename>MI_analyzer.py
import pandas
import matplotlib.pyplot as plt
from sklearn.metrics import mutual_info_score as mi
import numpy as np
def Calculate_MI(numpy_array):
"""
Takes in a numpy array (which has been presumably parsed from an alignment),
calculates the mutual information, and returns it as a vector.
"""
pandas_vector = pandas.DataFrame(numpy_array)
row_length = len(numpy_array[0])
mi_vector = np.zeros((row_length,row_length)) # Initialize matrix of zeros.
# Then fill it with the MI scores.
for i in range(row_length):
for j in range(row_length):
if i < j:
mi_vector[i,j] = mi(pandas_vector.ix[:,i],pandas_vector.ix[:,j])
mi_vector_trans = mi_vector.T
mi_vector_square = mi_vector + mi_vector_trans
return mi_vector_square
def Plot_Binary(mi_vector, plot_filename, cmap='binary'):
"""
Creates a binary heatmap of the mutual information, prints it to a PDF
"""
plt.pcolormesh(mi_vector, cmap=cmap)
# cb = plt.colorbar(orientation='horizontal')
# cb.set_label('Mutual Information')
fig = plt.gcf() #get current figure
fig.set_size_inches(6, 6) #set figure size to 6 inches by 6 inches
plt.savefig(plot_filename)
plt.show()
return fig | 3.25 | 3 |
niworkflows/workflows/epi/tests/test_refmap.py | notZaki/niworkflows | 0 | 12769841 | <reponame>notZaki/niworkflows
"""Check the refmap module."""
import os
from ..refmap import init_epi_reference_wf
def test_reference(tmpdir, ds000030_dir, workdir, outdir):
"""Exercise the EPI reference workflow."""
tmpdir.chdir()
wf = init_epi_reference_wf(omp_nthreads=os.cpu_count(), auto_bold_nss=True)
if workdir:
wf.base_dir = str(workdir)
wf.inputs.inputnode.in_files = [
str(f) for f in (ds000030_dir / "sub-10228" / "func").glob("*_bold.nii.gz")
]
# if outdir:
# out_path = outdir / "masks" / folder.split("/")[-1]
# out_path.mkdir(exist_ok=True, parents=True)
# report = pe.Node(SimpleShowMaskRPT(), name="report")
# report.interface._always_run = True
# def _report_name(fname, out_path):
# from pathlib import Path
# return str(
# out_path
# / Path(fname)
# .name.replace(".nii", "_mask.svg")
# .replace("_magnitude", "_desc-magnitude")
# .replace(".gz", "")
# )
# # fmt: off
# wf.connect([
# (inputnode, report, [(("in_file", _report_name, out_path), "out_report")]),
# (brainmask_wf, report, [("outputnode.out_mask", "mask_file"),
# ("outputnode.out_file", "background_file")]),
# ])
# # fmt: on
wf.run()
| 1.820313 | 2 |
server/models/User.py | surbina/devocion-ar-server | 0 | 12769842 | """This module contains the class definition for Users."""
from sqlalchemy import Column, Integer, String
from sqlalchemy.orm import relationship
from server.models.Base import BaseModel
from server.models.Devotional import DevotionalModel
from server.models.Comment import CommentModel
class UserModel(BaseModel):
"""This class represent the db model for a User."""
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
email = Column(String(50))
first_name = Column(String(50))
last_name = Column(String(50))
devotionals = relationship(DevotionalModel, backref='author')
comments = relationship(CommentModel, backref='author')
| 3.21875 | 3 |
djangobmf/contrib/quotation/workflows.py | dmatthes/django-bmf | 1 | 12769843 | #!/usr/bin/python
# ex:set fileencoding=utf-8:
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from djangobmf.workflows import Workflow, State, Transition
import datetime
class QuotationWorkflow(Workflow):
class States:
draft = State(_(u"Draft"), True, delete=False)
send = State(_(u"Send"), update=False, delete=False)
accepted = State(_(u"Accepted"), update=False, delete=False)
invoiced = State(_(u"Done"), update=False, delete=False)
cancelled = State(_(u"Cancelled"), update=False, delete=True)
class Transitions:
send = Transition(_("Send to customer"), ["draft"], "send")
accept = Transition(_("Quotation accepted by customer"), 'send', 'accepted')
reopen = Transition(_("Reopen this quotation"), 'cancelled', 'draft')
invoice = Transition(_("Generate invoice"), 'accepted', 'invoiced')
revise = Transition(_("Revise this quotation"), ('send', 'accepted'), 'draft')
cancel = Transition(_("Cancel"), ('draft', 'send', 'accepted'), 'cancelled', validate=False)
# def revise(self, instance, user):
# print instance
# print user
# return True
def invoice(self):
if not self.instance.invoice:
invoice_mdl = self.instance._meta.model.invoice.field.related_field.model
products = invoice_mdl.products.through
invoice = invoice_mdl(
customer=self.instance.customer,
project=self.instance.project,
employee=self.instance.employee,
shipping_address=self.instance.shipping_address,
invoice_address=self.instance.invoice_address,
notes=self.instance.notes,
net=self.instance.net,
term_of_payment=self.instance.term_of_payment,
date=datetime.datetime.now().date(),
created_by=self.user,
modified_by=self.user,
)
invoice.save()
# save the items from the quotation to the invoice
for item in self.instance.quotation_products.select_related('product'):
invoice_item = products(
invoice=invoice,
product=item.product,
amount=item.amount,
price=item.price,
name=item.name,
description=item.description,
)
invoice_item.save()
self.instance.invoice = invoice
# self.instance.save()
| 2.15625 | 2 |
graditudelib/heatmap.py | foerstner-lab/GRADitude | 1 | 12769844 | import pandas as pd
import holoviews as hv
import matplotlib.pyplot as plt
hv.extension('bokeh', width=90)
def plot_heatmap(feature_count_table, feature_count_start_column,
feature_count_end_column, y_label_, output_file):
table = pd.read_table(feature_count_table, sep="\t")
table.set_index('Gene', inplace=True)
value_matrix = _extract_value_matrix(table, feature_count_start_column,
feature_count_end_column)
heatmaps(value_matrix, y_label_, output_file)
def _extract_value_matrix(feature_count_table_df,
feature_count_start_column, feature_count_end_column):
return feature_count_table_df.iloc[:, int(feature_count_start_column) - 1:feature_count_end_column - 1]
def heatmaps(value_matrix, y_label_, output_file):
fig, axes_x = plt.subplots(figsize=(100, 100))
imshow_ = axes_x.imshow(value_matrix, cmap="Greens")
fig.colorbar(imshow_, ax=axes_x)
axes_x.set_xticks(range(len(value_matrix.columns)))
axes_x.set_yticks(range(len(value_matrix.index)))
axes_x.set_xticklabels(range(1, 21))
axes_x.set_yticklabels(value_matrix.index)
axes_x.set_xlabel("Fractions", fontsize=18)
axes_x.set_ylabel(y_label_, fontsize=10)
plt.savefig(output_file)
plt.show()
| 2.625 | 3 |
scons/mara_options.py | svn2github/Escript | 0 | 12769845 |
##############################################################################
#
# Copyright (c) 2003-2018 by The University of Queensland
# http://www.uq.edu.au
#
# Primary Business: Queensland, Australia
# Licensed under the Apache License, version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
# Development 2012-2013 by School of Earth Sciences
# Development from 2014 by Centre for Geoscience Computing (GeoComp)
#
##############################################################################
from templates.sid_py3_mpi_options import *
escript_opts_version = 203
#cuda = True
cc_optim = '-O3 -march=native'
cc_debug = "-g3 -O0 -DDOASSERT -DDOPROF -DBOUNDS_CHECK -D_GLIBCXX_DEBUG -fno-omit-frame-pointer" #-fsanitize=address
cxx_extra = '-Wextra -Wno-unused-parameter -Wno-deprecated-declarations -g -fdiagnostics-color'
nvccflags = "-arch=sm_30 -DBOOST_NOINLINE='__attribute__((noinline))'"
#ld_extra = ''
#werror = False
#debug = True
#ld_extra = '-fsanitize=address'
verbose = True
parmetis = True
trilinos = True
trilinos_prefix = '/opt/trilinos_hybrid_eti'
umfpack = True
lapack_prefix = ['/usr/include/atlas', '/usr/lib/atlas-base']
silo = True
visit = False
visit_prefix = '/opt/visit/2.7.0b/linux-x86_64/libsim/V2'
#visit_libs = ['simV2']
launcher = "mpirun ${AGENTOVERRIDE} ${EE} --map-by node:pe=%t -bind-to none -np %N %b"
#longindices = True
#cxx_extra += ' -Wconversion'
#lapack = 'none'
#parmetis = False
| 1.515625 | 2 |
vulnscan_parser/models/nmap/host.py | happyc0ding/vulnscan-parser | 17 | 12769846 | from vulnscan_parser.models.vshost import VSHost
class NmapHost(VSHost):
def __init__(self):
super().__init__()
self.ignored_dict_props.append('scripts')
self.scripts = set()
@property
def address(self):
return self.ip
@address.setter
def address(self, value):
self.ip = value
| 2.265625 | 2 |
dashboard/migrations/0027_rename_psetsubmission_pset.py | ankanb240/otis-web | 15 | 12769847 | <filename>dashboard/migrations/0027_rename_psetsubmission_pset.py
# Generated by Django 3.2.5 on 2021-08-04 19:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('roster', '0061_alter_registrationcontainer_allowed_tracks'),
('core', '0027_alter_semester_uses_legacy_pset_system'),
('dashboard', '0026_rename_number_level_threshold'),
]
operations = [
migrations.RenameModel(
old_name='PSetSubmission',
new_name='PSet',
),
]
| 1.5625 | 2 |
generator_counter.py | volhadounar/Python_tricks | 0 | 12769848 | <reponame>volhadounar/Python_tricks
def gen_counter(cnt=None):
res = 0
while True:
yield res
res += 1 if cnt is None else cnt
if __name__=='__main__':
cnt = gen_counter()
print(next(cnt))
print(next(cnt))
print(next(cnt))
print(next(cnt))
print(next(cnt))
cnt = gen_counter(5)
print(next(cnt))
print(next(cnt))
print(next(cnt))
print(next(cnt))
print(next(cnt))
| 3.5625 | 4 |
ultraopt/hdl/_hdl2cs.py | auto-flow/oxygen | 90 | 12769849 | import re
from collections import defaultdict
from copy import deepcopy
from typing import Dict, List
from ConfigSpace import CategoricalHyperparameter, Constant
from ConfigSpace import ConfigurationSpace, Configuration
from ConfigSpace import ForbiddenInClause, ForbiddenEqualsClause, ForbiddenAndConjunction
from ConfigSpace import InCondition, EqualsCondition
from ultraopt.hdl import hp_def
from ultraopt.hdl.utils import is_hdl_bottom
from ultraopt.utils.logging_ import get_logger
class HDL2CS():
def __init__(self):
self.logger = get_logger(__name__)
def __call__(self, hdl: Dict):
cs = self.recursion(hdl)
return cs
def __condition(self, item: Dict, store: Dict):
child = item["_child"]
child = store[child]
parent = item["_parent"]
parent = store[parent]
value = (item["_values"])
if (isinstance(value, list) and len(value) == 1):
value = value[0]
if isinstance(value, list):
cond = InCondition(child, parent, list(map(hp_def._encode, value)))
else:
cond = EqualsCondition(child, parent, hp_def._encode(value))
return cond
def __forbidden(self, value: List, store: Dict, cs: ConfigurationSpace):
assert isinstance(value, list)
for item in value:
assert isinstance(item, dict)
clauses = []
for name, forbidden_values in item.items():
if isinstance(forbidden_values, list) and len(forbidden_values) == 1:
forbidden_values = forbidden_values[0]
if isinstance(forbidden_values, list):
clauses.append(ForbiddenInClause(store[name], list(map(hp_def._encode, forbidden_values))))
else:
clauses.append(ForbiddenEqualsClause(store[name], hp_def._encode(forbidden_values)))
cs.add_forbidden_clause(ForbiddenAndConjunction(*clauses))
def reverse_dict(self, dict_: Dict):
reversed_dict = defaultdict(list)
for key, value in dict_.items():
if isinstance(value, list):
for v in value:
reversed_dict[v].append(key)
else:
reversed_dict[value].append(key)
reversed_dict = dict(reversed_dict)
for key, value in reversed_dict.items():
reversed_dict[key] = list(set(value))
return reversed_dict
def pop_covered_item(self, dict_: Dict, length: int):
dict_ = deepcopy(dict_)
should_pop = []
for key, value in dict_.items():
assert isinstance(value, list)
if len(value) > length:
self.logger.warning("len(value) > length")
should_pop.append(key)
elif len(value) == length:
should_pop.append(key)
for key in should_pop:
dict_.pop(key)
return dict_
def __activate(self, value: Dict, store: Dict, cs: ConfigurationSpace):
assert isinstance(value, dict)
for k, v in value.items():
assert isinstance(v, dict)
reversed_dict = self.reverse_dict(v)
reversed_dict = self.pop_covered_item(reversed_dict, len(v))
for sk, sv in reversed_dict.items():
cond = self.__condition(
{
"_child": sk,
"_values": sv,
"_parent": k
},
store,
)
cs.add_condition(cond)
def eliminate_suffix(self, key: str):
s = "(choice)"
if key.endswith(s):
key = key[:-len(s)]
return key
def add_configuration_space(
self, cs: ConfigurationSpace, cs_name: str, hdl_value: dict, is_choice: bool,
option_hp: Configuration, children_is_choice=False):
if is_choice:
cs.add_configuration_space(cs_name, self.recursion(hdl_value, children_is_choice),
parent_hyperparameter={"parent": option_hp, "value": cs_name})
else:
cs.add_configuration_space(cs_name, self.recursion(hdl_value, children_is_choice))
def recursion(self, hdl, is_choice=False):
############ Declare ConfigurationSpace variables ###################
cs = ConfigurationSpace()
####### Fill placeholder to empty ConfigurationSpace ################
key_list = list(hdl.keys())
if len(key_list) == 0:
cs.add_hyperparameter(Constant("placeholder", "placeholder"))
return cs
###################### Declare common variables #####################
option_hp = None
pattern = re.compile(r"(.*)\((.*)\)")
store = {}
conditions_dict = {}
########### If parent is choice configuration_space #################
if is_choice:
choices = []
for k, v in hdl.items():
if not is_hdl_bottom(k, v) and isinstance(v, dict):
k = self.eliminate_suffix(k)
choices.append(self.eliminate_suffix(k))
option_hp = CategoricalHyperparameter('__choice__', choices)
cs.add_hyperparameter(option_hp)
#### Travel key,value in hdl items, if value is dict(hdl), do recursion ######
# fixme: 'option_hp' maybe reference without define ?
for hdl_key, hdl_value in hdl.items():
mat = pattern.match(hdl_key)
# add_configuration_space (choice)
if mat and isinstance(hdl_value, dict):
groups = mat.groups()
assert len(groups) == 2, ValueError(f"Invalid hdl_key {hdl_key}")
cs_name, method = groups
assert method == "choice", ValueError(f"Invalid suffix {method}")
self.add_configuration_space(cs, cs_name, hdl_value, is_choice, option_hp, True)
elif is_hdl_bottom(hdl_key, hdl_value):
if hdl_key.startswith("__"):
conditions_dict[hdl_key] = hdl_value
else:
hp = self.__parse_dict_to_config(hdl_key, hdl_value)
cs.add_hyperparameter(hp)
store[hdl_key] = hp
# add_configuration_space
elif isinstance(hdl_value, dict):
cs_name = hdl_key
self.add_configuration_space(cs, cs_name, hdl_value, is_choice, option_hp)
else:
raise NotImplementedError
########### Processing conditional hyperparameters #################
for key, value in conditions_dict.items():
condition_indicator = key
if condition_indicator == "__condition":
assert isinstance(value, list)
for item in value:
cond = self.__condition(item, store)
cs.add_condition(cond)
elif condition_indicator == "__activate":
self.__activate(value, store, cs)
elif condition_indicator == "__forbidden":
self.__forbidden(value, store, cs)
else:
self.logger.warning(f"Invalid condition_indicator: {condition_indicator}")
# fixme: remove 'rely_model'
return cs
# add_hyperparameter
def __parse_dict_to_config(self, key, value):
if isinstance(value, dict):
_type = value.get("_type")
_value = value.get("_value")
_default = value.get("_default")
assert _value is not None
if _type in ("choice", "ordinal"):
return eval(f"hp_def.{_type}(key, _value, _default)")
else:
return eval(f'''hp_def.{_type}("{key}",*_value,default=_default)''')
else:
return Constant(key, hp_def._encode(value))
def hdl2cs(hdl: dict) -> ConfigurationSpace:
return HDL2CS()(hdl)
| 2.21875 | 2 |
boozo/utils/timestamp.py | skillplot/pyboozo | 0 | 12769850 | <reponame>skillplot/pyboozo
## Copyright (c) 2020 mangalbhaskar
"""Common date and timestamp utilities and inter-conversion of them.
Dependencies: arrow
References:
* http://zetcode.com/python/arrow/
"""
__author__ = 'mangalbhaskar'
import os
import arrow
from boozo.boot._log_ import log
from .typeformats import *
def now():
"""returns the date with timezone in consistent way.
This to be used specifically when creating serializable objects with dates to store in the database in particular.
"""
now = arrow.now()
date_time_zone = now.format(_date_format_)
return date_time_zone
def ts():
"""returns the timestamp in the `_timestamp_format_` format"""
import datetime
ts = (_timestamp_format_).format(datetime.datetime.now())
return ts
def timestamp():
"""wrapper function."""
return ts()
def modified_on(filepath, ts=False):
"""returns the last modified timestamp with timezone.
References:
* https://stackoverflow.com/questions/237079/how-to-get-file-creation-modification-date-times-in-python
"""
modified_on = arrow.Arrow.fromtimestamp(os.stat(filepath).st_mtime).format(_date_format_)
if ts:
modified_on = ts_from_datestring(modified_on)
return modified_on
def date_from_ts(ts):
"""returns the date object from the given string date in the `_date_format_`
Todo:
some warning to the call to get function for api change in the future release
"""
ar = arrow.get(ts, _date_format_)
dt = ar.date()
return dt
def ts_from_datestring(dt):
"""returns the timestamp in the `_timestamp_format_` given the date string in the `_date_format_` format
Todo:
some warning to the call to get function for api change in the future release
"""
ar = arrow.get(dt, _date_format_)
ts = (_timestamp_format_).format(ar.datetime)
return ts
| 2.53125 | 3 |